blob: a505a71ee639f0c386d9b9184489a136e5fa74b3 [file] [log] [blame]
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
Brad Volkin44e895a2014-05-10 14:10:43 -07004#include <linux/hashtable.h>
5
6#define I915_CMD_HASH_ORDER 9
7
Ville Syrjälä633cf8f2012-12-03 18:43:32 +02008/*
9 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
10 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
11 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
12 *
13 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
14 * cacheline, the Head Pointer must not be greater than the Tail
15 * Pointer."
16 */
17#define I915_RING_FREE_SPACE 64
18
Zou Nan hai8187a2b2010-05-21 09:08:55 +080019struct intel_hw_status_page {
Daniel Vetter4225d0f2012-04-26 23:28:16 +020020 u32 *page_addr;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080021 unsigned int gfx_addr;
Chris Wilson05394f32010-11-08 19:18:58 +000022 struct drm_i915_gem_object *obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080023};
24
Ben Widawskyb7287d82011-04-25 11:22:22 -070025#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
26#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080027
Ben Widawskyb7287d82011-04-25 11:22:22 -070028#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
29#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080030
Ben Widawskyb7287d82011-04-25 11:22:22 -070031#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
32#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080033
Ben Widawskyb7287d82011-04-25 11:22:22 -070034#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
35#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080036
Ben Widawskyb7287d82011-04-25 11:22:22 -070037#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
38#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
Daniel Vetter870e86d2010-08-02 16:29:44 +020039
Naresh Kumar Kachhie9fea572014-03-12 16:39:41 +053040#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
Chris Wilson9991ae72014-04-02 16:36:07 +010041#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
Naresh Kumar Kachhie9fea572014-03-12 16:39:41 +053042
Jani Nikulaf2f4d822013-08-11 12:44:01 +030043enum intel_ring_hangcheck_action {
Mika Kuoppalada661462013-09-06 16:03:28 +030044 HANGCHECK_IDLE = 0,
Jani Nikulaf2f4d822013-08-11 12:44:01 +030045 HANGCHECK_WAIT,
46 HANGCHECK_ACTIVE,
47 HANGCHECK_KICK,
48 HANGCHECK_HUNG,
49};
Mika Kuoppalaad8beae2013-06-12 12:35:32 +030050
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +020051#define HANGCHECK_SCORE_RING_HUNG 31
52
Mika Kuoppala92cab732013-05-24 17:16:07 +030053struct intel_ring_hangcheck {
Chris Wilson50877442014-03-21 12:41:53 +000054 u64 acthd;
Mika Kuoppala92cab732013-05-24 17:16:07 +030055 u32 seqno;
Mika Kuoppala05407ff2013-05-30 09:04:29 +030056 int score;
Mika Kuoppalaad8beae2013-06-12 12:35:32 +030057 enum intel_ring_hangcheck_action action;
Chris Wilson50877442014-03-21 12:41:53 +000058 bool deadlock;
Mika Kuoppala92cab732013-05-24 17:16:07 +030059};
60
Zou Nan hai8187a2b2010-05-21 09:08:55 +080061struct intel_ring_buffer {
62 const char *name;
Chris Wilson92204342010-09-18 11:02:01 +010063 enum intel_ring_id {
Daniel Vetter96154f22011-12-14 13:57:00 +010064 RCS = 0x0,
65 VCS,
66 BCS,
Ben Widawsky4a3dd192013-05-28 19:22:19 -070067 VECS,
Zhao Yakui845f74a2014-04-17 10:37:37 +080068 VCS2
Chris Wilson92204342010-09-18 11:02:01 +010069 } id;
Zhao Yakui845f74a2014-04-17 10:37:37 +080070#define I915_NUM_RINGS 5
Zhao Yakuib1a93302014-04-17 10:37:36 +080071#define LAST_USER_RING (VECS + 1)
Daniel Vetter333e9fe2010-08-02 16:24:01 +020072 u32 mmio_base;
Chris Wilson311bd682011-01-13 19:06:50 +000073 void __iomem *virtual_start;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080074 struct drm_device *dev;
Chris Wilson05394f32010-11-08 19:18:58 +000075 struct drm_i915_gem_object *obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080076
Chris Wilson8c0a6bf2010-12-09 12:56:37 +000077 u32 head;
78 u32 tail;
Chris Wilson780f0ca2010-09-23 17:45:39 +010079 int space;
Chris Wilsonc2c347a92010-10-27 15:11:53 +010080 int size;
Chris Wilson55249ba2010-12-22 14:04:47 +000081 int effective_size;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080082 struct intel_hw_status_page status_page;
83
Chris Wilsona71d8d92012-02-15 11:25:36 +000084 /** We track the position of the requests in the ring buffer, and
85 * when each is retired we increment last_retired_head as the GPU
86 * must have finished processing the request and so we know we
87 * can advance the ringbuffer up to that position.
88 *
89 * last_retired_head is set to -1 after the value is consumed so
90 * we can detect new retirements.
91 */
92 u32 last_retired_head;
93
Daniel Vetterc7113cc2013-07-04 23:35:29 +020094 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
Daniel Vetter6a848cc2012-04-11 22:12:46 +020095 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
Chris Wilsondb53a302011-02-03 11:57:46 +000096 u32 trace_irq_seqno;
Chris Wilsonb13c2b92010-12-13 16:54:50 +000097 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +000098 void (*irq_put)(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080099
Chris Wilson78501ea2010-10-27 12:18:21 +0100100 int (*init)(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800101
Chris Wilson78501ea2010-10-27 12:18:21 +0100102 void (*write_tail)(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100103 u32 value);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000104 int __must_check (*flush)(struct intel_ring_buffer *ring,
105 u32 invalidate_domains,
106 u32 flush_domains);
Chris Wilson9d7730912012-11-27 16:22:52 +0000107 int (*add_request)(struct intel_ring_buffer *ring);
Chris Wilsonb2eadbc2012-08-09 10:58:30 +0100108 /* Some chipsets are not quite as coherent as advertised and need
109 * an expensive kick to force a true read of the up-to-date seqno.
110 * However, the up-to-date seqno is not always required and the last
111 * seen value is good enough. Note that the seqno will always be
112 * monotonic, even if not coherent.
113 */
114 u32 (*get_seqno)(struct intel_ring_buffer *ring,
115 bool lazy_coherency);
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200116 void (*set_seqno)(struct intel_ring_buffer *ring,
117 u32 seqno);
Chris Wilson78501ea2010-10-27 12:18:21 +0100118 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
Ben Widawsky9bcb1442014-04-28 19:29:25 -0700119 u64 offset, u32 length,
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100120 unsigned flags);
121#define I915_DISPATCH_SECURE 0x1
Daniel Vetterb45305f2012-12-17 16:21:27 +0100122#define I915_DISPATCH_PINNED 0x2
Zou Nan hai8d192152010-11-02 16:31:01 +0800123 void (*cleanup)(struct intel_ring_buffer *ring);
Ben Widawskyebc348b2014-04-29 14:52:28 -0700124
125 struct {
126 u32 sync_seqno[I915_NUM_RINGS-1];
Ben Widawsky78325f22014-04-29 14:52:29 -0700127
Ben Widawskyebc348b2014-04-29 14:52:28 -0700128 struct {
129 /* our mbox written by others */
130 u32 wait[I915_NUM_RINGS];
131 /* mboxes this ring signals to */
132 u32 signal[I915_NUM_RINGS];
133 } mbox;
Ben Widawsky78325f22014-04-29 14:52:29 -0700134
135 /* AKA wait() */
136 int (*sync_to)(struct intel_ring_buffer *ring,
137 struct intel_ring_buffer *to,
138 u32 seqno);
Ben Widawsky024a43e2014-04-29 14:52:30 -0700139 int (*signal)(struct intel_ring_buffer *signaller,
140 /* num_dwords needed by caller */
141 unsigned int num_dwords);
Ben Widawskyebc348b2014-04-29 14:52:28 -0700142 } semaphore;
Ben Widawskyad776f82013-05-28 19:22:18 -0700143
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800144 /**
145 * List of objects currently involved in rendering from the
146 * ringbuffer.
147 *
148 * Includes buffers having the contents of their GPU caches
149 * flushed, not necessarily primitives. last_rendering_seqno
150 * represents when the rendering involved will be completed.
151 *
152 * A reference is held on the buffer while on this list.
153 */
154 struct list_head active_list;
155
156 /**
157 * List of breadcrumbs associated with GPU requests currently
158 * outstanding.
159 */
160 struct list_head request_list;
161
Chris Wilsona56ba562010-09-28 10:07:56 +0100162 /**
163 * Do we have some not yet emitted requests outstanding?
164 */
Chris Wilson3c0e2342013-09-04 10:45:52 +0100165 struct drm_i915_gem_request *preallocated_lazy_request;
Chris Wilson18235212013-09-04 10:45:51 +0100166 u32 outstanding_lazy_seqno;
Daniel Vettercc889e02012-06-13 20:45:19 +0200167 bool gpu_caches_dirty;
Chris Wilsonc65355b2013-06-06 16:53:41 -0300168 bool fbc_dirty;
Chris Wilsona56ba562010-09-28 10:07:56 +0100169
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800170 wait_queue_head_t irq_queue;
Zou Nan hai8d192152010-11-02 16:31:01 +0800171
Ben Widawsky40521052012-06-04 14:42:43 -0700172 struct i915_hw_context *default_context;
Chris Wilson112522f2013-05-02 16:48:07 +0300173 struct i915_hw_context *last_context;
Ben Widawsky40521052012-06-04 14:42:43 -0700174
Mika Kuoppala92cab732013-05-24 17:16:07 +0300175 struct intel_ring_hangcheck hangcheck;
176
Chris Wilson0d1aaca2013-08-26 20:58:11 +0100177 struct {
178 struct drm_i915_gem_object *obj;
179 u32 gtt_offset;
180 volatile u32 *cpu_page;
181 } scratch;
Brad Volkin351e3db2014-02-18 10:15:46 -0800182
Brad Volkin44e895a2014-05-10 14:10:43 -0700183 bool needs_cmd_parser;
184
Brad Volkin351e3db2014-02-18 10:15:46 -0800185 /*
Brad Volkin44e895a2014-05-10 14:10:43 -0700186 * Table of commands the command parser needs to know about
Brad Volkin351e3db2014-02-18 10:15:46 -0800187 * for this ring.
188 */
Brad Volkin44e895a2014-05-10 14:10:43 -0700189 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
Brad Volkin351e3db2014-02-18 10:15:46 -0800190
191 /*
192 * Table of registers allowed in commands that read/write registers.
193 */
194 const u32 *reg_table;
195 int reg_count;
196
197 /*
198 * Table of registers allowed in commands that read/write registers, but
199 * only from the DRM master.
200 */
201 const u32 *master_reg_table;
202 int master_reg_count;
203
204 /*
205 * Returns the bitmask for the length field of the specified command.
206 * Return 0 for an unrecognized/invalid command.
207 *
208 * If the command parser finds an entry for a command in the ring's
209 * cmd_tables, it gets the command's length based on the table entry.
210 * If not, it calls this function to determine the per-ring length field
211 * encoding for the command (i.e. certain opcode ranges use certain bits
212 * to encode the command length in the header).
213 */
214 u32 (*get_cmd_length_mask)(u32 cmd_header);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800215};
216
Chris Wilsonb4519512012-05-11 14:29:30 +0100217static inline bool
218intel_ring_initialized(struct intel_ring_buffer *ring)
219{
220 return ring->obj != NULL;
221}
222
Daniel Vetter96154f22011-12-14 13:57:00 +0100223static inline unsigned
224intel_ring_flag(struct intel_ring_buffer *ring)
225{
226 return 1 << ring->id;
227}
228
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800229static inline u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000230intel_ring_sync_index(struct intel_ring_buffer *ring,
231 struct intel_ring_buffer *other)
232{
233 int idx;
234
235 /*
236 * cs -> 0 = vcs, 1 = bcs
237 * vcs -> 0 = bcs, 1 = cs,
238 * bcs -> 0 = cs, 1 = vcs.
239 */
240
241 idx = (other - ring) - 1;
242 if (idx < 0)
243 idx += I915_NUM_RINGS;
244
245 return idx;
246}
247
248static inline u32
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800249intel_read_status_page(struct intel_ring_buffer *ring,
Chris Wilson78501ea2010-10-27 12:18:21 +0100250 int reg)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800251{
Daniel Vetter4225d0f2012-04-26 23:28:16 +0200252 /* Ensure that the compiler doesn't optimize away the load. */
253 barrier();
254 return ring->status_page.page_addr[reg];
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800255}
256
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200257static inline void
258intel_write_status_page(struct intel_ring_buffer *ring,
259 int reg, u32 value)
260{
261 ring->status_page.page_addr[reg] = value;
262}
263
Chris Wilson311bd682011-01-13 19:06:50 +0000264/**
265 * Reads a dword out of the status page, which is written to from the command
266 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
267 * MI_STORE_DATA_IMM.
268 *
269 * The following dwords have a reserved meaning:
270 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
271 * 0x04: ring 0 head pointer
272 * 0x05: ring 1 head pointer (915-class)
273 * 0x06: ring 2 head pointer (915-class)
274 * 0x10-0x1b: Context status DWords (GM45)
275 * 0x1f: Last written status offset. (GM45)
276 *
277 * The area from dword 0x20 to 0x3ff is available for driver usage.
278 */
Chris Wilson311bd682011-01-13 19:06:50 +0000279#define I915_GEM_HWS_INDEX 0x20
Jesse Barnes9a289772012-10-26 09:42:42 -0700280#define I915_GEM_HWS_SCRATCH_INDEX 0x30
281#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
Chris Wilson311bd682011-01-13 19:06:50 +0000282
Chris Wilsone3efda42014-04-09 09:19:41 +0100283void intel_stop_ring_buffer(struct intel_ring_buffer *ring);
Chris Wilson78501ea2010-10-27 12:18:21 +0100284void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
Ben Widawsky96f298a2011-03-19 18:14:27 -0700285
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100286int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
Ville Syrjälä753b1ad2014-02-11 19:52:05 +0200287int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
Chris Wilson78501ea2010-10-27 12:18:21 +0100288static inline void intel_ring_emit(struct intel_ring_buffer *ring,
289 u32 data)
Chris Wilsone898cd22010-08-04 15:18:14 +0100290{
Chris Wilson78501ea2010-10-27 12:18:21 +0100291 iowrite32(data, ring->virtual_start + ring->tail);
Chris Wilsone898cd22010-08-04 15:18:14 +0100292 ring->tail += 4;
293}
Chris Wilson09246732013-08-10 22:16:32 +0100294static inline void intel_ring_advance(struct intel_ring_buffer *ring)
295{
296 ring->tail &= ring->size - 1;
297}
298void __intel_ring_advance(struct intel_ring_buffer *ring);
299
Chris Wilson3e960502012-11-27 16:22:54 +0000300int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
Mika Kuoppalaf7e98ad2012-12-19 11:13:06 +0200301void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
Chris Wilsona7b97612012-07-20 12:41:08 +0100302int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
303int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800304
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800305int intel_init_render_ring_buffer(struct drm_device *dev);
306int intel_init_bsd_ring_buffer(struct drm_device *dev);
Zhao Yakui845f74a2014-04-17 10:37:37 +0800307int intel_init_bsd2_ring_buffer(struct drm_device *dev);
Chris Wilson549f7362010-10-19 11:19:32 +0100308int intel_init_blt_ring_buffer(struct drm_device *dev);
Ben Widawsky9a8a2212013-05-28 19:22:23 -0700309int intel_init_vebox_ring_buffer(struct drm_device *dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800310
Chris Wilson50877442014-03-21 12:41:53 +0000311u64 intel_ring_get_active_head(struct intel_ring_buffer *ring);
Chris Wilson78501ea2010-10-27 12:18:21 +0100312void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
Daniel Vetter79f321b2010-09-24 21:20:10 +0200313
Chris Wilsona71d8d92012-02-15 11:25:36 +0000314static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
315{
316 return ring->tail;
317}
318
Chris Wilson9d7730912012-11-27 16:22:52 +0000319static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
320{
Chris Wilson18235212013-09-04 10:45:51 +0100321 BUG_ON(ring->outstanding_lazy_seqno == 0);
322 return ring->outstanding_lazy_seqno;
Chris Wilson9d7730912012-11-27 16:22:52 +0000323}
324
Chris Wilsondb53a302011-02-03 11:57:46 +0000325static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
326{
327 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
328 ring->trace_irq_seqno = seqno;
329}
330
Chris Wilsone8616b62011-01-20 09:57:11 +0000331/* DRI warts */
332int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
333
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800334#endif /* _INTEL_RINGBUFFER_H_ */