blob: 5af65b89765f7d71106d737f01a50f75f0f4ac50 [file] [log] [blame]
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
4struct intel_hw_status_page {
Daniel Vetter4225d0f2012-04-26 23:28:16 +02005 u32 *page_addr;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08006 unsigned int gfx_addr;
Chris Wilson05394f32010-11-08 19:18:58 +00007 struct drm_i915_gem_object *obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08008};
9
Ben Widawskyb7287d82011-04-25 11:22:22 -070010#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
11#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080012
Ben Widawskyb7287d82011-04-25 11:22:22 -070013#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
14#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080015
Ben Widawskyb7287d82011-04-25 11:22:22 -070016#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
17#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080018
Ben Widawskyb7287d82011-04-25 11:22:22 -070019#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
20#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080021
Ben Widawskyb7287d82011-04-25 11:22:22 -070022#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
23#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
Daniel Vetter870e86d2010-08-02 16:29:44 +020024
Ben Widawskyb7287d82011-04-25 11:22:22 -070025#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
26#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
27#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
Chris Wilson1ec14ad2010-12-04 11:30:53 +000028
Zou Nan hai8187a2b2010-05-21 09:08:55 +080029struct intel_ring_buffer {
30 const char *name;
Chris Wilson92204342010-09-18 11:02:01 +010031 enum intel_ring_id {
Daniel Vetter96154f22011-12-14 13:57:00 +010032 RCS = 0x0,
33 VCS,
34 BCS,
Chris Wilson92204342010-09-18 11:02:01 +010035 } id;
Daniel Vetter96154f22011-12-14 13:57:00 +010036#define I915_NUM_RINGS 3
Daniel Vetter333e9fe2010-08-02 16:24:01 +020037 u32 mmio_base;
Chris Wilson311bd682011-01-13 19:06:50 +000038 void __iomem *virtual_start;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080039 struct drm_device *dev;
Chris Wilson05394f32010-11-08 19:18:58 +000040 struct drm_i915_gem_object *obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080041
Chris Wilson8c0a6bf2010-12-09 12:56:37 +000042 u32 head;
43 u32 tail;
Chris Wilson780f0ca2010-09-23 17:45:39 +010044 int space;
Chris Wilsonc2c347a92010-10-27 15:11:53 +010045 int size;
Chris Wilson55249ba2010-12-22 14:04:47 +000046 int effective_size;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080047 struct intel_hw_status_page status_page;
48
Chris Wilsona71d8d92012-02-15 11:25:36 +000049 /** We track the position of the requests in the ring buffer, and
50 * when each is retired we increment last_retired_head as the GPU
51 * must have finished processing the request and so we know we
52 * can advance the ringbuffer up to that position.
53 *
54 * last_retired_head is set to -1 after the value is consumed so
55 * we can detect new retirements.
56 */
57 u32 last_retired_head;
58
Chris Wilson7338aef2012-04-24 21:48:47 +010059 u32 irq_refcount; /* protected by dev_priv->irq_lock */
Daniel Vetter6a848cc2012-04-11 22:12:46 +020060 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
Chris Wilsondb53a302011-02-03 11:57:46 +000061 u32 trace_irq_seqno;
Chris Wilson1ec14ad2010-12-04 11:30:53 +000062 u32 sync_seqno[I915_NUM_RINGS-1];
Chris Wilsonb13c2b92010-12-13 16:54:50 +000063 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +000064 void (*irq_put)(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080065
Chris Wilson78501ea2010-10-27 12:18:21 +010066 int (*init)(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080067
Chris Wilson78501ea2010-10-27 12:18:21 +010068 void (*write_tail)(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +010069 u32 value);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000070 int __must_check (*flush)(struct intel_ring_buffer *ring,
71 u32 invalidate_domains,
72 u32 flush_domains);
Chris Wilson3cce4692010-10-27 16:11:02 +010073 int (*add_request)(struct intel_ring_buffer *ring,
74 u32 *seqno);
Chris Wilsonb2eadbc2012-08-09 10:58:30 +010075 /* Some chipsets are not quite as coherent as advertised and need
76 * an expensive kick to force a true read of the up-to-date seqno.
77 * However, the up-to-date seqno is not always required and the last
78 * seen value is good enough. Note that the seqno will always be
79 * monotonic, even if not coherent.
80 */
81 u32 (*get_seqno)(struct intel_ring_buffer *ring,
82 bool lazy_coherency);
Chris Wilson78501ea2010-10-27 12:18:21 +010083 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
Chris Wilsond7d4eed2012-10-17 12:09:54 +010084 u32 offset, u32 length,
85 unsigned flags);
86#define I915_DISPATCH_SECURE 0x1
Zou Nan hai8d192152010-11-02 16:31:01 +080087 void (*cleanup)(struct intel_ring_buffer *ring);
Ben Widawskyc8c99b02011-09-14 20:32:47 -070088 int (*sync_to)(struct intel_ring_buffer *ring,
89 struct intel_ring_buffer *to,
90 u32 seqno);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080091
Ben Widawskyc8c99b02011-09-14 20:32:47 -070092 u32 semaphore_register[3]; /*our mbox written by others */
93 u32 signal_mbox[2]; /* mboxes this ring signals to */
Zou Nan hai8187a2b2010-05-21 09:08:55 +080094 /**
95 * List of objects currently involved in rendering from the
96 * ringbuffer.
97 *
98 * Includes buffers having the contents of their GPU caches
99 * flushed, not necessarily primitives. last_rendering_seqno
100 * represents when the rendering involved will be completed.
101 *
102 * A reference is held on the buffer while on this list.
103 */
104 struct list_head active_list;
105
106 /**
107 * List of breadcrumbs associated with GPU requests currently
108 * outstanding.
109 */
110 struct list_head request_list;
111
Chris Wilsona56ba562010-09-28 10:07:56 +0100112 /**
113 * Do we have some not yet emitted requests outstanding?
114 */
Chris Wilson5d97eb62010-11-10 20:40:02 +0000115 u32 outstanding_lazy_request;
Daniel Vettercc889e02012-06-13 20:45:19 +0200116 bool gpu_caches_dirty;
Chris Wilsona56ba562010-09-28 10:07:56 +0100117
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800118 wait_queue_head_t irq_queue;
Zou Nan hai8d192152010-11-02 16:31:01 +0800119
Ben Widawsky12b02862012-06-04 14:42:50 -0700120 /**
121 * Do an explicit TLB flush before MI_SET_CONTEXT
122 */
123 bool itlb_before_ctx_switch;
Ben Widawsky40521052012-06-04 14:42:43 -0700124 struct i915_hw_context *default_context;
Ben Widawskye0556842012-06-04 14:42:46 -0700125 struct drm_i915_gem_object *last_context_obj;
Ben Widawsky40521052012-06-04 14:42:43 -0700126
Zou Nan hai8d192152010-11-02 16:31:01 +0800127 void *private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800128};
129
Chris Wilsonb4519512012-05-11 14:29:30 +0100130static inline bool
131intel_ring_initialized(struct intel_ring_buffer *ring)
132{
133 return ring->obj != NULL;
134}
135
Daniel Vetter96154f22011-12-14 13:57:00 +0100136static inline unsigned
137intel_ring_flag(struct intel_ring_buffer *ring)
138{
139 return 1 << ring->id;
140}
141
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800142static inline u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000143intel_ring_sync_index(struct intel_ring_buffer *ring,
144 struct intel_ring_buffer *other)
145{
146 int idx;
147
148 /*
149 * cs -> 0 = vcs, 1 = bcs
150 * vcs -> 0 = bcs, 1 = cs,
151 * bcs -> 0 = cs, 1 = vcs.
152 */
153
154 idx = (other - ring) - 1;
155 if (idx < 0)
156 idx += I915_NUM_RINGS;
157
158 return idx;
159}
160
161static inline u32
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800162intel_read_status_page(struct intel_ring_buffer *ring,
Chris Wilson78501ea2010-10-27 12:18:21 +0100163 int reg)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800164{
Daniel Vetter4225d0f2012-04-26 23:28:16 +0200165 /* Ensure that the compiler doesn't optimize away the load. */
166 barrier();
167 return ring->status_page.page_addr[reg];
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800168}
169
Chris Wilson311bd682011-01-13 19:06:50 +0000170/**
171 * Reads a dword out of the status page, which is written to from the command
172 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
173 * MI_STORE_DATA_IMM.
174 *
175 * The following dwords have a reserved meaning:
176 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
177 * 0x04: ring 0 head pointer
178 * 0x05: ring 1 head pointer (915-class)
179 * 0x06: ring 2 head pointer (915-class)
180 * 0x10-0x1b: Context status DWords (GM45)
181 * 0x1f: Last written status offset. (GM45)
182 *
183 * The area from dword 0x20 to 0x3ff is available for driver usage.
184 */
Chris Wilson311bd682011-01-13 19:06:50 +0000185#define I915_GEM_HWS_INDEX 0x20
Jesse Barnes9a289772012-10-26 09:42:42 -0700186#define I915_GEM_HWS_SCRATCH_INDEX 0x30
187#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
Chris Wilson311bd682011-01-13 19:06:50 +0000188
Chris Wilson78501ea2010-10-27 12:18:21 +0100189void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
Ben Widawsky96f298a2011-03-19 18:14:27 -0700190
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100191int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
Ben Widawsky96f298a2011-03-19 18:14:27 -0700192static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
193{
Chris Wilsona94919e2011-07-12 18:03:29 +0100194 return intel_wait_ring_buffer(ring, ring->size - 8);
Ben Widawsky96f298a2011-03-19 18:14:27 -0700195}
196
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100197int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
Chris Wilsone898cd22010-08-04 15:18:14 +0100198
Chris Wilson78501ea2010-10-27 12:18:21 +0100199static inline void intel_ring_emit(struct intel_ring_buffer *ring,
200 u32 data)
Chris Wilsone898cd22010-08-04 15:18:14 +0100201{
Chris Wilson78501ea2010-10-27 12:18:21 +0100202 iowrite32(data, ring->virtual_start + ring->tail);
Chris Wilsone898cd22010-08-04 15:18:14 +0100203 ring->tail += 4;
204}
205
Chris Wilson78501ea2010-10-27 12:18:21 +0100206void intel_ring_advance(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800207
Chris Wilson78501ea2010-10-27 12:18:21 +0100208u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
Chris Wilsona7b97612012-07-20 12:41:08 +0100209int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
210int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800211
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800212int intel_init_render_ring_buffer(struct drm_device *dev);
213int intel_init_bsd_ring_buffer(struct drm_device *dev);
Chris Wilson549f7362010-10-19 11:19:32 +0100214int intel_init_blt_ring_buffer(struct drm_device *dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800215
Chris Wilson78501ea2010-10-27 12:18:21 +0100216u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
217void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
Daniel Vetter79f321b2010-09-24 21:20:10 +0200218
Chris Wilsona71d8d92012-02-15 11:25:36 +0000219static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
220{
221 return ring->tail;
222}
223
Chris Wilsondb53a302011-02-03 11:57:46 +0000224static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
225{
226 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
227 ring->trace_irq_seqno = seqno;
228}
229
Chris Wilsone8616b62011-01-20 09:57:11 +0000230/* DRI warts */
231int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
232
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800233#endif /* _INTEL_RINGBUFFER_H_ */