blob: 9e81ff3b39cd2583c07e525d0f17a6e514bdee99 [file] [log] [blame]
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
4struct intel_hw_status_page {
5 void *page_addr;
6 unsigned int gfx_addr;
7 struct drm_gem_object *obj;
8};
9
Daniel Vetter870e86d2010-08-02 16:29:44 +020010#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
11#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
Daniel Vetter6c0e1c52010-08-02 16:33:33 +020012#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
13#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
Daniel Vetter570ef602010-08-02 17:06:23 +020014#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
15#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
Daniel Vetter7f2ab692010-08-02 17:06:59 +020016#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
17#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
Daniel Vetter870e86d2010-08-02 16:29:44 +020018
Zou Nan hai8187a2b2010-05-21 09:08:55 +080019struct drm_i915_gem_execbuffer2;
20struct intel_ring_buffer {
21 const char *name;
Chris Wilson92204342010-09-18 11:02:01 +010022 enum intel_ring_id {
23 RING_RENDER = 0x1,
24 RING_BSD = 0x2,
Chris Wilson549f7362010-10-19 11:19:32 +010025 RING_BLT = 0x4,
Chris Wilson92204342010-09-18 11:02:01 +010026 } id;
Daniel Vetter333e9fe2010-08-02 16:24:01 +020027 u32 mmio_base;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080028 unsigned long size;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080029 void *virtual_start;
30 struct drm_device *dev;
31 struct drm_gem_object *gem_object;
32
33 unsigned int head;
34 unsigned int tail;
Chris Wilson780f0ca2010-09-23 17:45:39 +010035 int space;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080036 struct intel_hw_status_page status_page;
37
38 u32 irq_gem_seqno; /* last seq seem at irq time */
39 u32 waiting_gem_seqno;
40 int user_irq_refcount;
41 void (*user_irq_get)(struct drm_device *dev,
42 struct intel_ring_buffer *ring);
43 void (*user_irq_put)(struct drm_device *dev,
44 struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080045
46 int (*init)(struct drm_device *dev,
47 struct intel_ring_buffer *ring);
48
Xiang, Haihaod46eefa2010-09-16 10:43:12 +080049 void (*set_tail)(struct drm_device *dev,
Daniel Vetter870e86d2010-08-02 16:29:44 +020050 struct intel_ring_buffer *ring,
51 u32 value);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080052 void (*flush)(struct drm_device *dev,
53 struct intel_ring_buffer *ring,
54 u32 invalidate_domains,
55 u32 flush_domains);
56 u32 (*add_request)(struct drm_device *dev,
57 struct intel_ring_buffer *ring,
Zou Nan hai8187a2b2010-05-21 09:08:55 +080058 u32 flush_domains);
Chris Wilsonf787a5f2010-09-24 16:02:42 +010059 u32 (*get_seqno)(struct drm_device *dev,
60 struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080061 int (*dispatch_gem_execbuffer)(struct drm_device *dev,
62 struct intel_ring_buffer *ring,
63 struct drm_i915_gem_execbuffer2 *exec,
64 struct drm_clip_rect *cliprects,
65 uint64_t exec_offset);
66
67 /**
68 * List of objects currently involved in rendering from the
69 * ringbuffer.
70 *
71 * Includes buffers having the contents of their GPU caches
72 * flushed, not necessarily primitives. last_rendering_seqno
73 * represents when the rendering involved will be completed.
74 *
75 * A reference is held on the buffer while on this list.
76 */
77 struct list_head active_list;
78
79 /**
80 * List of breadcrumbs associated with GPU requests currently
81 * outstanding.
82 */
83 struct list_head request_list;
84
Chris Wilsona56ba562010-09-28 10:07:56 +010085 /**
86 * Do we have some not yet emitted requests outstanding?
87 */
88 bool outstanding_lazy_request;
89
Zou Nan hai8187a2b2010-05-21 09:08:55 +080090 wait_queue_head_t irq_queue;
91 drm_local_map_t map;
92};
93
94static inline u32
95intel_read_status_page(struct intel_ring_buffer *ring,
96 int reg)
97{
98 u32 *regs = ring->status_page.page_addr;
99 return regs[reg];
100}
101
102int intel_init_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100103 struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800104void intel_cleanup_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100105 struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800106int intel_wait_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100107 struct intel_ring_buffer *ring, int n);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800108void intel_ring_begin(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100109 struct intel_ring_buffer *ring, int n);
Chris Wilsone898cd22010-08-04 15:18:14 +0100110
111static inline void intel_ring_emit(struct drm_device *dev,
112 struct intel_ring_buffer *ring,
113 unsigned int data)
114{
115 unsigned int *virt = ring->virtual_start + ring->tail;
116 *virt = data;
117 ring->tail += 4;
118}
119
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800120void intel_ring_advance(struct drm_device *dev,
121 struct intel_ring_buffer *ring);
122
123u32 intel_ring_get_seqno(struct drm_device *dev,
124 struct intel_ring_buffer *ring);
125
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800126int intel_init_render_ring_buffer(struct drm_device *dev);
127int intel_init_bsd_ring_buffer(struct drm_device *dev);
Chris Wilson549f7362010-10-19 11:19:32 +0100128int intel_init_blt_ring_buffer(struct drm_device *dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800129
Daniel Vetter79f321b2010-09-24 21:20:10 +0200130u32 intel_ring_get_active_head(struct drm_device *dev,
131 struct intel_ring_buffer *ring);
Daniel Vetter447da182010-09-24 21:49:27 +0200132void intel_ring_setup_status_page(struct drm_device *dev,
133 struct intel_ring_buffer *ring);
Daniel Vetter79f321b2010-09-24 21:20:10 +0200134
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800135#endif /* _INTEL_RINGBUFFER_H_ */