blob: 4ca5308e12bb8f3394cdaf93ce0f42b02b37e6c9 [file] [log] [blame]
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
Brad Volkin44e895a2014-05-10 14:10:43 -07004#include <linux/hashtable.h>
Chris Wilson06fbca72015-04-07 16:20:36 +01005#include "i915_gem_batch_pool.h"
Chris Wilsondcff85c2016-08-05 10:14:11 +01006#include "i915_gem_request.h"
Chris Wilson73cb9702016-10-28 13:58:46 +01007#include "i915_gem_timeline.h"
Chris Wilsonf97fbf92017-02-13 17:15:14 +00008#include "i915_selftest.h"
Brad Volkin44e895a2014-05-10 14:10:43 -07009
10#define I915_CMD_HASH_ORDER 9
11
Oscar Mateo47122742014-07-24 17:04:28 +010012/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
13 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
14 * to give some inclination as to some of the magic values used in the various
15 * workarounds!
16 */
17#define CACHELINE_BYTES 64
Arun Siluvery17ee9502015-06-19 19:07:01 +010018#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
Oscar Mateo47122742014-07-24 17:04:28 +010019
Ville Syrjälä633cf8f2012-12-03 18:43:32 +020020/*
21 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
22 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
23 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
24 *
25 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
26 * cacheline, the Head Pointer must not be greater than the Tail
27 * Pointer."
28 */
29#define I915_RING_FREE_SPACE 64
30
Chris Wilson57e88532016-08-15 10:48:57 +010031struct intel_hw_status_page {
32 struct i915_vma *vma;
33 u32 *page_addr;
34 u32 ggtt_offset;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080035};
36
Dave Gordonbbdc070a2016-07-20 18:16:05 +010037#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
38#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080039
Dave Gordonbbdc070a2016-07-20 18:16:05 +010040#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
41#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080042
Dave Gordonbbdc070a2016-07-20 18:16:05 +010043#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
44#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080045
Dave Gordonbbdc070a2016-07-20 18:16:05 +010046#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
47#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080048
Dave Gordonbbdc070a2016-07-20 18:16:05 +010049#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
50#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
Daniel Vetter870e86d2010-08-02 16:29:44 +020051
Dave Gordonbbdc070a2016-07-20 18:16:05 +010052#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
53#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
Naresh Kumar Kachhie9fea572014-03-12 16:39:41 +053054
Ben Widawsky3e789982014-06-30 09:53:37 -070055/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
56 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
57 */
Chris Wilson8c126722016-04-07 07:29:14 +010058#define gen8_semaphore_seqno_size sizeof(uint64_t)
59#define GEN8_SEMAPHORE_OFFSET(__from, __to) \
60 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
Ben Widawsky3e789982014-06-30 09:53:37 -070061#define GEN8_SIGNAL_OFFSET(__ring, to) \
Chris Wilson51d545d2016-08-15 10:49:02 +010062 (dev_priv->semaphore->node.start + \
Chris Wilson8c126722016-04-07 07:29:14 +010063 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
Ben Widawsky3e789982014-06-30 09:53:37 -070064#define GEN8_WAIT_OFFSET(__ring, from) \
Chris Wilson51d545d2016-08-15 10:49:02 +010065 (dev_priv->semaphore->node.start + \
Chris Wilson8c126722016-04-07 07:29:14 +010066 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
Ben Widawsky3e789982014-06-30 09:53:37 -070067
Chris Wilson7e37f882016-08-02 22:50:21 +010068enum intel_engine_hangcheck_action {
Mika Kuoppala3fe3b032016-11-18 15:09:04 +020069 ENGINE_IDLE = 0,
70 ENGINE_WAIT,
71 ENGINE_ACTIVE_SEQNO,
72 ENGINE_ACTIVE_HEAD,
73 ENGINE_ACTIVE_SUBUNITS,
74 ENGINE_WAIT_KICK,
75 ENGINE_DEAD,
Jani Nikulaf2f4d822013-08-11 12:44:01 +030076};
Mika Kuoppalaad8beae2013-06-12 12:35:32 +030077
Mika Kuoppala3fe3b032016-11-18 15:09:04 +020078static inline const char *
79hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
80{
81 switch (a) {
82 case ENGINE_IDLE:
83 return "idle";
84 case ENGINE_WAIT:
85 return "wait";
86 case ENGINE_ACTIVE_SEQNO:
87 return "active seqno";
88 case ENGINE_ACTIVE_HEAD:
89 return "active head";
90 case ENGINE_ACTIVE_SUBUNITS:
91 return "active subunits";
92 case ENGINE_WAIT_KICK:
93 return "wait kick";
94 case ENGINE_DEAD:
95 return "dead";
96 }
97
98 return "unknown";
99}
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +0200100
Ben Widawskyf9e61372016-09-20 16:54:33 +0300101#define I915_MAX_SLICES 3
102#define I915_MAX_SUBSLICES 3
103
104#define instdone_slice_mask(dev_priv__) \
105 (INTEL_GEN(dev_priv__) == 7 ? \
106 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
107
108#define instdone_subslice_mask(dev_priv__) \
109 (INTEL_GEN(dev_priv__) == 7 ? \
110 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
111
112#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
113 for ((slice__) = 0, (subslice__) = 0; \
114 (slice__) < I915_MAX_SLICES; \
115 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
116 (slice__) += ((subslice__) == 0)) \
117 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
118 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
119
Ben Widawskyd6369512016-09-20 16:54:32 +0300120struct intel_instdone {
121 u32 instdone;
122 /* The following exist only in the RCS engine */
123 u32 slice_common;
Ben Widawskyf9e61372016-09-20 16:54:33 +0300124 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
125 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
Ben Widawskyd6369512016-09-20 16:54:32 +0300126};
127
Chris Wilson7e37f882016-08-02 22:50:21 +0100128struct intel_engine_hangcheck {
Chris Wilson50877442014-03-21 12:41:53 +0000129 u64 acthd;
Mika Kuoppala92cab732013-05-24 17:16:07 +0300130 u32 seqno;
Chris Wilson7e37f882016-08-02 22:50:21 +0100131 enum intel_engine_hangcheck_action action;
Mika Kuoppala3fe3b032016-11-18 15:09:04 +0200132 unsigned long action_timestamp;
Chris Wilson4be17382014-06-06 10:22:29 +0100133 int deadlock;
Ben Widawskyd6369512016-09-20 16:54:32 +0300134 struct intel_instdone instdone;
Mika Kuoppala3fe3b032016-11-18 15:09:04 +0200135 bool stalled;
Mika Kuoppala92cab732013-05-24 17:16:07 +0300136};
137
Chris Wilson7e37f882016-08-02 22:50:21 +0100138struct intel_ring {
Tvrtko Ursulin0eb973d2016-01-15 15:10:28 +0000139 struct i915_vma *vma;
Chris Wilson57e88532016-08-15 10:48:57 +0100140 void *vaddr;
Oscar Mateo8ee14972014-05-22 14:13:34 +0100141
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000142 struct intel_engine_cs *engine;
Daniel Vetter0c7dd532014-08-11 16:17:44 +0200143
Chris Wilson675d9ad2016-08-04 07:52:36 +0100144 struct list_head request_list;
145
Oscar Mateo8ee14972014-05-22 14:13:34 +0100146 u32 head;
147 u32 tail;
Chris Wilsoneca56a32017-02-06 17:05:01 +0000148
Oscar Mateo8ee14972014-05-22 14:13:34 +0100149 int space;
150 int size;
151 int effective_size;
152
153 /** We track the position of the requests in the ring buffer, and
154 * when each is retired we increment last_retired_head as the GPU
155 * must have finished processing the request and so we know we
156 * can advance the ringbuffer up to that position.
157 *
158 * last_retired_head is set to -1 after the value is consumed so
159 * we can detect new retirements.
160 */
161 u32 last_retired_head;
162};
163
Chris Wilsone2efd132016-05-24 14:53:34 +0100164struct i915_gem_context;
Jordan Justen361b0272016-03-06 23:30:27 -0800165struct drm_i915_reg_table;
Nick Hoath21076372015-01-15 13:10:38 +0000166
Arun Siluvery17ee9502015-06-19 19:07:01 +0100167/*
168 * we use a single page to load ctx workarounds so all of these
169 * values are referred in terms of dwords
170 *
171 * struct i915_wa_ctx_bb:
172 * offset: specifies batch starting position, also helpful in case
173 * if we want to have multiple batches at different offsets based on
174 * some criteria. It is not a requirement at the moment but provides
175 * an option for future use.
176 * size: size of the batch in DWORDS
177 */
Chris Wilson48bb74e2016-08-15 10:49:04 +0100178struct i915_ctx_workarounds {
Arun Siluvery17ee9502015-06-19 19:07:01 +0100179 struct i915_wa_ctx_bb {
180 u32 offset;
181 u32 size;
182 } indirect_ctx, per_ctx;
Chris Wilson48bb74e2016-08-15 10:49:04 +0100183 struct i915_vma *vma;
Arun Siluvery17ee9502015-06-19 19:07:01 +0100184};
185
Chris Wilsonc81d4612016-07-01 17:23:25 +0100186struct drm_i915_gem_request;
Chris Wilson4e50f082016-10-28 13:58:31 +0100187struct intel_render_state;
Chris Wilsonc81d4612016-07-01 17:23:25 +0100188
Chris Wilsonc0336662016-05-06 15:40:21 +0100189struct intel_engine_cs {
190 struct drm_i915_private *i915;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800191 const char *name;
Tvrtko Ursulin117897f2016-03-16 11:00:40 +0000192 enum intel_engine_id {
Tvrtko Ursulinde1add32016-01-15 15:12:50 +0000193 RCS = 0,
Daniel Vetter96154f22011-12-14 13:57:00 +0100194 BCS,
Tvrtko Ursulinde1add32016-01-15 15:12:50 +0000195 VCS,
196 VCS2, /* Keep instances of the same type engine together. */
197 VECS
Chris Wilson92204342010-09-18 11:02:01 +0100198 } id;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +0000199#define _VCS(n) (VCS + (n))
Chris Wilson426960b2016-01-15 16:51:46 +0000200 unsigned int exec_id;
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100201 enum intel_engine_hw_id {
202 RCS_HW = 0,
203 VCS_HW,
204 BCS_HW,
205 VECS_HW,
206 VCS2_HW
207 } hw_id;
208 enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
Daniel Vetter333e9fe2010-08-02 16:24:01 +0200209 u32 mmio_base;
Dave Gordonc2c7f242016-07-13 16:03:35 +0100210 unsigned int irq_shift;
Chris Wilson7e37f882016-08-02 22:50:21 +0100211 struct intel_ring *buffer;
Chris Wilson73cb9702016-10-28 13:58:46 +0100212 struct intel_timeline *timeline;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800213
Chris Wilson4e50f082016-10-28 13:58:31 +0100214 struct intel_render_state *render_state;
215
Chris Wilson538b2572017-01-24 15:18:05 +0000216 unsigned long irq_posted;
217#define ENGINE_IRQ_BREADCRUMB 0
Chris Wilsonf7470262017-01-24 15:20:21 +0000218#define ENGINE_IRQ_EXECLIST 1
Chris Wilson538b2572017-01-24 15:18:05 +0000219
Chris Wilson688e6c72016-07-01 17:23:15 +0100220 /* Rather than have every client wait upon all user interrupts,
221 * with the herd waking after every interrupt and each doing the
222 * heavyweight seqno dance, we delegate the task (of being the
223 * bottom-half of the user interrupt) to the first client. After
224 * every interrupt, we wake up one client, who does the heavyweight
225 * coherent seqno read and either goes back to sleep (if incomplete),
226 * or wakes up all the completed clients in parallel, before then
227 * transferring the bottom-half status to the next client in the queue.
228 *
229 * Compared to walking the entire list of waiters in a single dedicated
230 * bottom-half, we reduce the latency of the first waiter by avoiding
231 * a context switch, but incur additional coherent seqno reads when
232 * following the chain of request breadcrumbs. Since it is most likely
233 * that we have a single client waiting on each seqno, then reducing
234 * the overhead of waking that client is much preferred.
235 */
236 struct intel_breadcrumbs {
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100237 struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
Chris Wilsonaca34b62016-07-06 12:39:02 +0100238
Chris Wilsonf6168e32016-10-28 13:58:55 +0100239 spinlock_t lock; /* protects the lists of requests; irqsafe */
Chris Wilson688e6c72016-07-01 17:23:15 +0100240 struct rb_root waiters; /* sorted by retirement, priority */
Chris Wilsonc81d4612016-07-01 17:23:25 +0100241 struct rb_root signals; /* sorted by retirement */
Chris Wilson688e6c72016-07-01 17:23:15 +0100242 struct intel_wait *first_wait; /* oldest waiter by retirement */
Chris Wilsonc81d4612016-07-01 17:23:25 +0100243 struct task_struct *signaler; /* used for fence signalling */
Chris Wilsonb3850852016-07-01 17:23:26 +0100244 struct drm_i915_gem_request *first_signal;
Chris Wilson688e6c72016-07-01 17:23:15 +0100245 struct timer_list fake_irq; /* used after a missed interrupt */
Chris Wilson83348ba2016-08-09 17:47:51 +0100246 struct timer_list hangcheck; /* detect missed interrupts */
247
248 unsigned long timeout;
Chris Wilsonaca34b62016-07-06 12:39:02 +0100249
250 bool irq_enabled : 1;
251 bool rpm_wakelock : 1;
Chris Wilsonf97fbf92017-02-13 17:15:14 +0000252 I915_SELFTEST_DECLARE(bool mock : 1);
Chris Wilson688e6c72016-07-01 17:23:15 +0100253 } breadcrumbs;
254
Chris Wilson06fbca72015-04-07 16:20:36 +0100255 /*
256 * A pool of objects to use as shadow copies of client batch buffers
257 * when the command parser is enabled. Prevents the client from
258 * modifying the batch contents after software parsing.
259 */
260 struct i915_gem_batch_pool batch_pool;
261
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800262 struct intel_hw_status_page status_page;
Arun Siluvery17ee9502015-06-19 19:07:01 +0100263 struct i915_ctx_workarounds wa_ctx;
Chris Wilson56c0f1a2016-08-15 10:48:58 +0100264 struct i915_vma *scratch;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800265
Chris Wilson61ff75a2016-07-01 17:23:28 +0100266 u32 irq_keep_mask; /* always keep these interrupts */
267 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
Dave Gordon38a0f2d2016-07-20 18:16:06 +0100268 void (*irq_enable)(struct intel_engine_cs *engine);
269 void (*irq_disable)(struct intel_engine_cs *engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800270
Dave Gordon38a0f2d2016-07-20 18:16:06 +0100271 int (*init_hw)(struct intel_engine_cs *engine);
Chris Wilson821ed7d2016-09-09 14:11:53 +0100272 void (*reset_hw)(struct intel_engine_cs *engine,
273 struct drm_i915_gem_request *req);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800274
Chris Wilsone8a9c582016-12-18 15:37:20 +0000275 int (*context_pin)(struct intel_engine_cs *engine,
276 struct i915_gem_context *ctx);
277 void (*context_unpin)(struct intel_engine_cs *engine,
278 struct i915_gem_context *ctx);
Chris Wilsonf73e7392016-12-18 15:37:24 +0000279 int (*request_alloc)(struct drm_i915_gem_request *req);
John Harrison87531812015-05-29 17:43:44 +0100280 int (*init_context)(struct drm_i915_gem_request *req);
Arun Siluvery86d7f232014-08-26 14:44:50 +0100281
Chris Wilsonddd66c52016-08-02 22:50:31 +0100282 int (*emit_flush)(struct drm_i915_gem_request *request,
283 u32 mode);
284#define EMIT_INVALIDATE BIT(0)
285#define EMIT_FLUSH BIT(1)
286#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
287 int (*emit_bb_start)(struct drm_i915_gem_request *req,
288 u64 offset, u32 length,
289 unsigned int dispatch_flags);
290#define I915_DISPATCH_SECURE BIT(0)
291#define I915_DISPATCH_PINNED BIT(1)
292#define I915_DISPATCH_RS BIT(2)
Chris Wilsoncaddfe72016-10-28 13:58:52 +0100293 void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000294 u32 *cs);
Chris Wilson98f29e82016-10-28 13:58:51 +0100295 int emit_breadcrumb_sz;
Chris Wilson5590af32016-09-09 14:11:54 +0100296
297 /* Pass the request to the hardware queue (e.g. directly into
298 * the legacy ringbuffer or to the end of an execlist).
299 *
300 * This is called from an atomic context with irqs disabled; must
301 * be irq safe.
302 */
Chris Wilsonddd66c52016-08-02 22:50:31 +0100303 void (*submit_request)(struct drm_i915_gem_request *req);
Chris Wilson5590af32016-09-09 14:11:54 +0100304
Chris Wilson0de91362016-11-14 20:41:01 +0000305 /* Call when the priority on a request has changed and it and its
306 * dependencies may need rescheduling. Note the request itself may
307 * not be ready to run!
308 *
309 * Called under the struct_mutex.
310 */
311 void (*schedule)(struct drm_i915_gem_request *request,
312 int priority);
313
Chris Wilsonb2eadbc2012-08-09 10:58:30 +0100314 /* Some chipsets are not quite as coherent as advertised and need
315 * an expensive kick to force a true read of the up-to-date seqno.
316 * However, the up-to-date seqno is not always required and the last
317 * seen value is good enough. Note that the seqno will always be
318 * monotonic, even if not coherent.
319 */
Dave Gordon38a0f2d2016-07-20 18:16:06 +0100320 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
Dave Gordon38a0f2d2016-07-20 18:16:06 +0100321 void (*cleanup)(struct intel_engine_cs *engine);
Ben Widawskyebc348b2014-04-29 14:52:28 -0700322
Ben Widawsky3e789982014-06-30 09:53:37 -0700323 /* GEN8 signal/wait table - never trust comments!
324 * signal to signal to signal to signal to signal to
325 * RCS VCS BCS VECS VCS2
326 * --------------------------------------------------------------------
327 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
328 * |-------------------------------------------------------------------
329 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
330 * |-------------------------------------------------------------------
331 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
332 * |-------------------------------------------------------------------
333 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
334 * |-------------------------------------------------------------------
335 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
336 * |-------------------------------------------------------------------
337 *
338 * Generalization:
339 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
340 * ie. transpose of g(x, y)
341 *
342 * sync from sync from sync from sync from sync from
343 * RCS VCS BCS VECS VCS2
344 * --------------------------------------------------------------------
345 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
346 * |-------------------------------------------------------------------
347 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
348 * |-------------------------------------------------------------------
349 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
350 * |-------------------------------------------------------------------
351 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
352 * |-------------------------------------------------------------------
353 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
354 * |-------------------------------------------------------------------
355 *
356 * Generalization:
357 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
358 * ie. transpose of f(x, y)
359 */
Ben Widawskyebc348b2014-04-29 14:52:28 -0700360 struct {
Ben Widawsky3e789982014-06-30 09:53:37 -0700361 union {
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +0100362#define GEN6_SEMAPHORE_LAST VECS_HW
363#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
364#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
Ben Widawsky3e789982014-06-30 09:53:37 -0700365 struct {
366 /* our mbox written by others */
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +0100367 u32 wait[GEN6_NUM_SEMAPHORES];
Ben Widawsky3e789982014-06-30 09:53:37 -0700368 /* mboxes this ring signals to */
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +0100369 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
Ben Widawsky3e789982014-06-30 09:53:37 -0700370 } mbox;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000371 u64 signal_ggtt[I915_NUM_ENGINES];
Ben Widawsky3e789982014-06-30 09:53:37 -0700372 };
Ben Widawsky78325f22014-04-29 14:52:29 -0700373
374 /* AKA wait() */
Chris Wilsonad7bdb22016-08-02 22:50:40 +0100375 int (*sync_to)(struct drm_i915_gem_request *req,
376 struct drm_i915_gem_request *signal);
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000377 u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
Ben Widawskyebc348b2014-04-29 14:52:28 -0700378 } semaphore;
Ben Widawskyad776f82013-05-28 19:22:18 -0700379
Oscar Mateo4da46e12014-07-24 17:04:27 +0100380 /* Execlists */
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100381 struct tasklet_struct irq_tasklet;
Chris Wilson70c2a242016-09-09 14:11:46 +0100382 struct execlist_port {
383 struct drm_i915_gem_request *request;
384 unsigned int count;
Chris Wilsonae9a0432017-02-07 10:23:19 +0000385 GEM_DEBUG_DECL(u32 context_id);
Chris Wilson70c2a242016-09-09 14:11:46 +0100386 } execlist_port[2];
Chris Wilson20311bd2016-11-14 20:41:03 +0000387 struct rb_root execlist_queue;
388 struct rb_node *execlist_first;
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100389 unsigned int fw_domains;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100390
Chris Wilsone8a9c582016-12-18 15:37:20 +0000391 /* Contexts are pinned whilst they are active on the GPU. The last
392 * context executed remains active whilst the GPU is idle - the
393 * switch away and write to the context object only occurs on the
394 * next execution. Contexts are only unpinned on retirement of the
395 * following request ensuring that we can always write to the object
396 * on the context switch even after idling. Across suspend, we switch
397 * to the kernel context and trash it as the save may not happen
398 * before the hardware is powered down.
399 */
400 struct i915_gem_context *last_retired_context;
401
402 /* We track the current MI_SET_CONTEXT in order to eliminate
403 * redudant context switches. This presumes that requests are not
404 * reordered! Or when they are the tracking is updated along with
405 * the emission of individual requests into the legacy command
406 * stream (ring).
407 */
408 struct i915_gem_context *legacy_active_context;
Ben Widawsky40521052012-06-04 14:42:43 -0700409
Chris Wilson7e37f882016-08-02 22:50:21 +0100410 struct intel_engine_hangcheck hangcheck;
Mika Kuoppala92cab732013-05-24 17:16:07 +0300411
Brad Volkin44e895a2014-05-10 14:10:43 -0700412 bool needs_cmd_parser;
413
Brad Volkin351e3db2014-02-18 10:15:46 -0800414 /*
Brad Volkin44e895a2014-05-10 14:10:43 -0700415 * Table of commands the command parser needs to know about
Chris Wilson33a051a2016-07-27 09:07:26 +0100416 * for this engine.
Brad Volkin351e3db2014-02-18 10:15:46 -0800417 */
Brad Volkin44e895a2014-05-10 14:10:43 -0700418 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
Brad Volkin351e3db2014-02-18 10:15:46 -0800419
420 /*
421 * Table of registers allowed in commands that read/write registers.
422 */
Jordan Justen361b0272016-03-06 23:30:27 -0800423 const struct drm_i915_reg_table *reg_tables;
424 int reg_table_count;
Brad Volkin351e3db2014-02-18 10:15:46 -0800425
426 /*
427 * Returns the bitmask for the length field of the specified command.
428 * Return 0 for an unrecognized/invalid command.
429 *
Chris Wilson33a051a2016-07-27 09:07:26 +0100430 * If the command parser finds an entry for a command in the engine's
Brad Volkin351e3db2014-02-18 10:15:46 -0800431 * cmd_tables, it gets the command's length based on the table entry.
Chris Wilson33a051a2016-07-27 09:07:26 +0100432 * If not, it calls this function to determine the per-engine length
433 * field encoding for the command (i.e. different opcode ranges use
434 * certain bits to encode the command length in the header).
Brad Volkin351e3db2014-02-18 10:15:46 -0800435 */
436 u32 (*get_cmd_length_mask)(u32 cmd_header);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800437};
438
Daniel Vetter96154f22011-12-14 13:57:00 +0100439static inline unsigned
Chris Wilson67d97da2016-07-04 08:08:31 +0100440intel_engine_flag(const struct intel_engine_cs *engine)
Daniel Vetter96154f22011-12-14 13:57:00 +0100441{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000442 return 1 << engine->id;
Daniel Vetter96154f22011-12-14 13:57:00 +0100443}
444
Imre Deak319404d2015-08-14 18:35:27 +0300445static inline void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000446intel_flush_status_page(struct intel_engine_cs *engine, int reg)
Imre Deak319404d2015-08-14 18:35:27 +0300447{
Chris Wilson0d317ce2016-04-09 10:57:56 +0100448 mb();
449 clflush(&engine->status_page.page_addr[reg]);
450 mb();
Imre Deak319404d2015-08-14 18:35:27 +0300451}
452
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000453static inline u32
Chris Wilson5dd8e502016-04-09 10:57:57 +0100454intel_read_status_page(struct intel_engine_cs *engine, int reg)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800455{
Daniel Vetter4225d0f2012-04-26 23:28:16 +0200456 /* Ensure that the compiler doesn't optimize away the load. */
Chris Wilson5dd8e502016-04-09 10:57:57 +0100457 return READ_ONCE(engine->status_page.page_addr[reg]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800458}
459
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200460static inline void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000461intel_write_status_page(struct intel_engine_cs *engine,
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200462 int reg, u32 value)
463{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000464 engine->status_page.page_addr[reg] = value;
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200465}
466
Jani Nikulae2828912016-01-18 09:19:47 +0200467/*
Chris Wilson311bd682011-01-13 19:06:50 +0000468 * Reads a dword out of the status page, which is written to from the command
469 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
470 * MI_STORE_DATA_IMM.
471 *
472 * The following dwords have a reserved meaning:
473 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
474 * 0x04: ring 0 head pointer
475 * 0x05: ring 1 head pointer (915-class)
476 * 0x06: ring 2 head pointer (915-class)
477 * 0x10-0x1b: Context status DWords (GM45)
478 * 0x1f: Last written status offset. (GM45)
Thomas Danielb07da532015-02-18 11:48:21 +0000479 * 0x20-0x2f: Reserved (Gen6+)
Chris Wilson311bd682011-01-13 19:06:50 +0000480 *
Thomas Danielb07da532015-02-18 11:48:21 +0000481 * The area from dword 0x30 to 0x3ff is available for driver usage.
Chris Wilson311bd682011-01-13 19:06:50 +0000482 */
Thomas Danielb07da532015-02-18 11:48:21 +0000483#define I915_GEM_HWS_INDEX 0x30
Chris Wilson7c17d372016-01-20 15:43:35 +0200484#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
Thomas Danielb07da532015-02-18 11:48:21 +0000485#define I915_GEM_HWS_SCRATCH_INDEX 0x40
Jesse Barnes9a289772012-10-26 09:42:42 -0700486#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
Chris Wilson311bd682011-01-13 19:06:50 +0000487
Chris Wilson7e37f882016-08-02 22:50:21 +0100488struct intel_ring *
489intel_engine_create_ring(struct intel_engine_cs *engine, int size);
Daniele Ceraolo Spuriod3ef1af2016-12-23 15:56:21 -0800490int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
Chris Wilsonaad29fb2016-08-02 22:50:23 +0100491void intel_ring_unpin(struct intel_ring *ring);
Chris Wilson7e37f882016-08-02 22:50:21 +0100492void intel_ring_free(struct intel_ring *ring);
Oscar Mateo84c23772014-07-24 17:04:15 +0100493
Chris Wilson7e37f882016-08-02 22:50:21 +0100494void intel_engine_stop(struct intel_engine_cs *engine);
495void intel_engine_cleanup(struct intel_engine_cs *engine);
Ben Widawsky96f298a2011-03-19 18:14:27 -0700496
Chris Wilson821ed7d2016-09-09 14:11:53 +0100497void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
498
John Harrisonbba09b12015-05-29 17:44:06 +0100499int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
Chris Wilson406ea8d2016-07-20 13:31:55 +0100500
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000501u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
Chris Wilson406ea8d2016-07-20 13:31:55 +0100502
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000503static inline void
504intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
Chris Wilson09246732013-08-10 22:16:32 +0100505{
Chris Wilson8f942012016-08-02 22:50:30 +0100506 /* Dummy function.
507 *
508 * This serves as a placeholder in the code so that the reader
509 * can compare against the preceding intel_ring_begin() and
510 * check that the number of dwords emitted matches the space
511 * reserved for the command packet (i.e. the value passed to
512 * intel_ring_begin()).
Chris Wilsonc5efa1a2016-08-02 22:50:29 +0100513 */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000514 GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
Chris Wilson8f942012016-08-02 22:50:30 +0100515}
516
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000517static inline u32
518intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
Chris Wilson8f942012016-08-02 22:50:30 +0100519{
520 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000521 u32 offset = addr - req->ring->vaddr;
522 GEM_BUG_ON(offset > req->ring->size);
523 return offset & (req->ring->size - 1);
Chris Wilson09246732013-08-10 22:16:32 +0100524}
Chris Wilson406ea8d2016-07-20 13:31:55 +0100525
Chris Wilson32c04f12016-08-02 22:50:22 +0100526void intel_ring_update_space(struct intel_ring *ring);
Chris Wilson09246732013-08-10 22:16:32 +0100527
Chris Wilson73cb9702016-10-28 13:58:46 +0100528void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800529
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100530void intel_engine_setup_common(struct intel_engine_cs *engine);
531int intel_engine_init_common(struct intel_engine_cs *engine);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100532int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
Chris Wilson96a945a2016-08-03 13:19:16 +0100533void intel_engine_cleanup_common(struct intel_engine_cs *engine);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100534
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +0100535int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
536int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
537int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
538int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
539int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800540
Chris Wilson7e37f882016-08-02 22:50:21 +0100541u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
Chris Wilson1b365952016-10-04 21:11:31 +0100542u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
543
Chris Wilson1b7744e2016-07-01 17:23:17 +0100544static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
545{
546 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
547}
Daniel Vetter79f321b2010-09-24 21:20:10 +0200548
Chris Wilsoncb399ea2016-11-01 10:03:16 +0000549static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
550{
551 /* We are only peeking at the tail of the submit queue (and not the
552 * queue itself) in order to gain a hint as to the current active
553 * state of the engine. Callers are not expected to be taking
554 * engine->timeline->lock, nor are they expected to be concerned
555 * wtih serialising this hint with anything, so document it as
556 * a hint and nothing more.
557 */
558 return READ_ONCE(engine->timeline->last_submitted_seqno);
559}
560
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000561int init_workarounds_ring(struct intel_engine_cs *engine);
Tvrtko Ursulin4ac96592017-02-14 15:00:17 +0000562int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
Michel Thierry771b9a52014-11-11 16:47:33 +0000563
Chris Wilson0e704472016-10-12 10:05:17 +0100564void intel_engine_get_instdone(struct intel_engine_cs *engine,
565 struct intel_instdone *instdone);
566
John Harrison29b1b412015-06-18 13:10:09 +0100567/*
568 * Arbitrary size for largest possible 'add request' sequence. The code paths
569 * are complex and variable. Empirical measurement shows that the worst case
Chris Wilson596e5ef2016-04-29 09:07:04 +0100570 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
571 * we need to allocate double the largest single packet within that emission
572 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
John Harrison29b1b412015-06-18 13:10:09 +0100573 */
Chris Wilson596e5ef2016-04-29 09:07:04 +0100574#define MIN_SPACE_FOR_ADD_REQUEST 336
John Harrison29b1b412015-06-18 13:10:09 +0100575
Chris Wilsona58c01a2016-04-29 13:18:21 +0100576static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
577{
Chris Wilson57e88532016-08-15 10:48:57 +0100578 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
Chris Wilsona58c01a2016-04-29 13:18:21 +0100579}
580
Chris Wilson688e6c72016-07-01 17:23:15 +0100581/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
Chris Wilson688e6c72016-07-01 17:23:15 +0100582int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
583
584static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
585{
586 wait->tsk = current;
587 wait->seqno = seqno;
588}
589
590static inline bool intel_wait_complete(const struct intel_wait *wait)
591{
592 return RB_EMPTY_NODE(&wait->node);
593}
594
595bool intel_engine_add_wait(struct intel_engine_cs *engine,
596 struct intel_wait *wait);
597void intel_engine_remove_wait(struct intel_engine_cs *engine,
598 struct intel_wait *wait);
Chris Wilsonb3850852016-07-01 17:23:26 +0100599void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
Chris Wilson688e6c72016-07-01 17:23:15 +0100600
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100601static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
Chris Wilson688e6c72016-07-01 17:23:15 +0100602{
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100603 return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
Chris Wilson688e6c72016-07-01 17:23:15 +0100604}
605
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100606static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
Chris Wilson688e6c72016-07-01 17:23:15 +0100607{
608 bool wakeup = false;
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100609
Chris Wilson688e6c72016-07-01 17:23:15 +0100610 /* Note that for this not to dangerously chase a dangling pointer,
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100611 * we must hold the rcu_read_lock here.
Chris Wilson688e6c72016-07-01 17:23:15 +0100612 *
613 * Also note that tsk is likely to be in !TASK_RUNNING state so an
614 * early test for tsk->state != TASK_RUNNING before wake_up_process()
615 * is unlikely to be beneficial.
616 */
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100617 if (intel_engine_has_waiter(engine)) {
618 struct task_struct *tsk;
619
620 rcu_read_lock();
621 tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
622 if (tsk)
623 wakeup = wake_up_process(tsk);
624 rcu_read_unlock();
625 }
626
Chris Wilson688e6c72016-07-01 17:23:15 +0100627 return wakeup;
628}
629
Chris Wilsonad07dfc2016-10-07 07:53:26 +0100630void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100631void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
Chris Wilson6a5d1db2016-11-08 14:37:19 +0000632unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
Chris Wilson688e6c72016-07-01 17:23:15 +0100633
Tvrtko Ursulin9f235df2017-02-16 12:23:25 +0000634static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
635{
636 memset(batch, 0, 6 * sizeof(u32));
637
638 batch[0] = GFX_OP_PIPE_CONTROL(6);
639 batch[1] = flags;
640 batch[2] = offset;
641
642 return batch + 6;
643}
644
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800645#endif /* _INTEL_RINGBUFFER_H_ */