blob: e437a9556c087dd60473da40ad2e2c0992eb006b [file] [log] [blame]
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
Brad Volkin44e895a2014-05-10 14:10:43 -07004#include <linux/hashtable.h>
Chris Wilson06fbca72015-04-07 16:20:36 +01005#include "i915_gem_batch_pool.h"
Chris Wilsondcff85c2016-08-05 10:14:11 +01006#include "i915_gem_request.h"
Chris Wilson73cb9702016-10-28 13:58:46 +01007#include "i915_gem_timeline.h"
Chris Wilsonf97fbf92017-02-13 17:15:14 +00008#include "i915_selftest.h"
Brad Volkin44e895a2014-05-10 14:10:43 -07009
Chris Wilsonf636edb2017-10-09 12:02:57 +010010struct drm_printer;
11
Brad Volkin44e895a2014-05-10 14:10:43 -070012#define I915_CMD_HASH_ORDER 9
13
Oscar Mateo47122742014-07-24 17:04:28 +010014/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
15 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
16 * to give some inclination as to some of the magic values used in the various
17 * workarounds!
18 */
19#define CACHELINE_BYTES 64
Arun Siluvery17ee9502015-06-19 19:07:01 +010020#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
Oscar Mateo47122742014-07-24 17:04:28 +010021
Chris Wilson57e88532016-08-15 10:48:57 +010022struct intel_hw_status_page {
23 struct i915_vma *vma;
24 u32 *page_addr;
25 u32 ggtt_offset;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080026};
27
Dave Gordonbbdc070a2016-07-20 18:16:05 +010028#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
29#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080030
Dave Gordonbbdc070a2016-07-20 18:16:05 +010031#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
32#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080033
Dave Gordonbbdc070a2016-07-20 18:16:05 +010034#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
35#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080036
Dave Gordonbbdc070a2016-07-20 18:16:05 +010037#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
38#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080039
Dave Gordonbbdc070a2016-07-20 18:16:05 +010040#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
41#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
Daniel Vetter870e86d2010-08-02 16:29:44 +020042
Dave Gordonbbdc070a2016-07-20 18:16:05 +010043#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
44#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
Naresh Kumar Kachhie9fea572014-03-12 16:39:41 +053045
Ben Widawsky3e789982014-06-30 09:53:37 -070046/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
47 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
48 */
Chris Wilson8c126722016-04-07 07:29:14 +010049#define gen8_semaphore_seqno_size sizeof(uint64_t)
50#define GEN8_SEMAPHORE_OFFSET(__from, __to) \
51 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
Ben Widawsky3e789982014-06-30 09:53:37 -070052#define GEN8_SIGNAL_OFFSET(__ring, to) \
Chris Wilson51d545d2016-08-15 10:49:02 +010053 (dev_priv->semaphore->node.start + \
Chris Wilson8c126722016-04-07 07:29:14 +010054 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
Ben Widawsky3e789982014-06-30 09:53:37 -070055#define GEN8_WAIT_OFFSET(__ring, from) \
Chris Wilson51d545d2016-08-15 10:49:02 +010056 (dev_priv->semaphore->node.start + \
Chris Wilson8c126722016-04-07 07:29:14 +010057 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
Ben Widawsky3e789982014-06-30 09:53:37 -070058
Chris Wilson7e37f882016-08-02 22:50:21 +010059enum intel_engine_hangcheck_action {
Mika Kuoppala3fe3b032016-11-18 15:09:04 +020060 ENGINE_IDLE = 0,
61 ENGINE_WAIT,
62 ENGINE_ACTIVE_SEQNO,
63 ENGINE_ACTIVE_HEAD,
64 ENGINE_ACTIVE_SUBUNITS,
65 ENGINE_WAIT_KICK,
66 ENGINE_DEAD,
Jani Nikulaf2f4d822013-08-11 12:44:01 +030067};
Mika Kuoppalaad8beae2013-06-12 12:35:32 +030068
Mika Kuoppala3fe3b032016-11-18 15:09:04 +020069static inline const char *
70hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
71{
72 switch (a) {
73 case ENGINE_IDLE:
74 return "idle";
75 case ENGINE_WAIT:
76 return "wait";
77 case ENGINE_ACTIVE_SEQNO:
78 return "active seqno";
79 case ENGINE_ACTIVE_HEAD:
80 return "active head";
81 case ENGINE_ACTIVE_SUBUNITS:
82 return "active subunits";
83 case ENGINE_WAIT_KICK:
84 return "wait kick";
85 case ENGINE_DEAD:
86 return "dead";
87 }
88
89 return "unknown";
90}
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +020091
Ben Widawskyf9e61372016-09-20 16:54:33 +030092#define I915_MAX_SLICES 3
93#define I915_MAX_SUBSLICES 3
94
95#define instdone_slice_mask(dev_priv__) \
96 (INTEL_GEN(dev_priv__) == 7 ? \
97 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
98
99#define instdone_subslice_mask(dev_priv__) \
100 (INTEL_GEN(dev_priv__) == 7 ? \
101 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
102
103#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
104 for ((slice__) = 0, (subslice__) = 0; \
105 (slice__) < I915_MAX_SLICES; \
106 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
107 (slice__) += ((subslice__) == 0)) \
108 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
109 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
110
Ben Widawskyd6369512016-09-20 16:54:32 +0300111struct intel_instdone {
112 u32 instdone;
113 /* The following exist only in the RCS engine */
114 u32 slice_common;
Ben Widawskyf9e61372016-09-20 16:54:33 +0300115 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
116 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
Ben Widawskyd6369512016-09-20 16:54:32 +0300117};
118
Chris Wilson7e37f882016-08-02 22:50:21 +0100119struct intel_engine_hangcheck {
Chris Wilson50877442014-03-21 12:41:53 +0000120 u64 acthd;
Mika Kuoppala92cab732013-05-24 17:16:07 +0300121 u32 seqno;
Chris Wilson7e37f882016-08-02 22:50:21 +0100122 enum intel_engine_hangcheck_action action;
Mika Kuoppala3fe3b032016-11-18 15:09:04 +0200123 unsigned long action_timestamp;
Chris Wilson4be17382014-06-06 10:22:29 +0100124 int deadlock;
Ben Widawskyd6369512016-09-20 16:54:32 +0300125 struct intel_instdone instdone;
Michel Thierryc64992e2017-06-20 10:57:44 +0100126 struct drm_i915_gem_request *active_request;
Mika Kuoppala3fe3b032016-11-18 15:09:04 +0200127 bool stalled;
Mika Kuoppala92cab732013-05-24 17:16:07 +0300128};
129
Chris Wilson7e37f882016-08-02 22:50:21 +0100130struct intel_ring {
Tvrtko Ursulin0eb973d2016-01-15 15:10:28 +0000131 struct i915_vma *vma;
Chris Wilson57e88532016-08-15 10:48:57 +0100132 void *vaddr;
Oscar Mateo8ee14972014-05-22 14:13:34 +0100133
Chris Wilson675d9ad2016-08-04 07:52:36 +0100134 struct list_head request_list;
135
Oscar Mateo8ee14972014-05-22 14:13:34 +0100136 u32 head;
137 u32 tail;
Chris Wilsone6ba9992017-04-25 14:00:49 +0100138 u32 emit;
Chris Wilsoneca56a32017-02-06 17:05:01 +0000139
Chris Wilson605d5b32017-05-04 14:08:44 +0100140 u32 space;
141 u32 size;
142 u32 effective_size;
Oscar Mateo8ee14972014-05-22 14:13:34 +0100143};
144
Chris Wilsone2efd132016-05-24 14:53:34 +0100145struct i915_gem_context;
Jordan Justen361b0272016-03-06 23:30:27 -0800146struct drm_i915_reg_table;
Nick Hoath21076372015-01-15 13:10:38 +0000147
Arun Siluvery17ee9502015-06-19 19:07:01 +0100148/*
149 * we use a single page to load ctx workarounds so all of these
150 * values are referred in terms of dwords
151 *
152 * struct i915_wa_ctx_bb:
153 * offset: specifies batch starting position, also helpful in case
154 * if we want to have multiple batches at different offsets based on
155 * some criteria. It is not a requirement at the moment but provides
156 * an option for future use.
157 * size: size of the batch in DWORDS
158 */
Chris Wilson48bb74e2016-08-15 10:49:04 +0100159struct i915_ctx_workarounds {
Arun Siluvery17ee9502015-06-19 19:07:01 +0100160 struct i915_wa_ctx_bb {
161 u32 offset;
162 u32 size;
163 } indirect_ctx, per_ctx;
Chris Wilson48bb74e2016-08-15 10:49:04 +0100164 struct i915_vma *vma;
Arun Siluvery17ee9502015-06-19 19:07:01 +0100165};
166
Chris Wilsonc81d4612016-07-01 17:23:25 +0100167struct drm_i915_gem_request;
168
Michal Wajdeczko237ae7c2017-03-01 20:26:15 +0000169/*
170 * Engine IDs definitions.
171 * Keep instances of the same type engine together.
172 */
173enum intel_engine_id {
174 RCS = 0,
175 BCS,
176 VCS,
177 VCS2,
178#define _VCS(n) (VCS + (n))
179 VECS
180};
181
Chris Wilson6c067572017-05-17 13:10:03 +0100182struct i915_priolist {
183 struct rb_node node;
184 struct list_head requests;
185 int priority;
186};
187
Mika Kuoppalab620e872017-09-22 15:43:03 +0300188/**
189 * struct intel_engine_execlists - execlist submission queue and port state
190 *
191 * The struct intel_engine_execlists represents the combined logical state of
192 * driver and the hardware state for execlist mode of submission.
193 */
194struct intel_engine_execlists {
195 /**
Sagar Arun Kamblec6dce8f2017-11-16 19:02:37 +0530196 * @tasklet: softirq tasklet for bottom handler
Mika Kuoppalab620e872017-09-22 15:43:03 +0300197 */
Sagar Arun Kamblec6dce8f2017-11-16 19:02:37 +0530198 struct tasklet_struct tasklet;
Mika Kuoppalab620e872017-09-22 15:43:03 +0300199
200 /**
201 * @default_priolist: priority list for I915_PRIORITY_NORMAL
202 */
203 struct i915_priolist default_priolist;
204
205 /**
206 * @no_priolist: priority lists disabled
207 */
208 bool no_priolist;
209
210 /**
211 * @port: execlist port states
212 *
213 * For each hardware ELSP (ExecList Submission Port) we keep
214 * track of the last request and the number of times we submitted
215 * that port to hw. We then count the number of times the hw reports
216 * a context completion or preemption. As only one context can
217 * be active on hw, we limit resubmission of context to port[0]. This
218 * is called Lite Restore, of the context.
219 */
220 struct execlist_port {
221 /**
222 * @request_count: combined request and submission count
223 */
224 struct drm_i915_gem_request *request_count;
225#define EXECLIST_COUNT_BITS 2
226#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
227#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
228#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
229#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
230#define port_set(p, packed) ((p)->request_count = (packed))
231#define port_isset(p) ((p)->request_count)
Mika Kuoppala7a62cc62017-09-22 15:43:06 +0300232#define port_index(p, execlists) ((p) - (execlists)->port)
Mika Kuoppalab620e872017-09-22 15:43:03 +0300233
234 /**
235 * @context_id: context ID for port
236 */
237 GEM_DEBUG_DECL(u32 context_id);
Mika Kuoppala76e70082017-09-22 15:43:07 +0300238
239#define EXECLIST_MAX_PORTS 2
240 } port[EXECLIST_MAX_PORTS];
241
242 /**
Chris Wilson4a118ec2017-10-23 22:32:36 +0100243 * @active: is the HW active? We consider the HW as active after
244 * submitting any context for execution and until we have seen the
245 * last context completion event. After that, we do not expect any
246 * more events until we submit, and so can park the HW.
247 *
248 * As we have a small number of different sources from which we feed
249 * the HW, we track the state of each inside a single bitfield.
Chris Wilsonbeecec92017-10-03 21:34:52 +0100250 */
Chris Wilson4a118ec2017-10-23 22:32:36 +0100251 unsigned int active;
252#define EXECLISTS_ACTIVE_USER 0
253#define EXECLISTS_ACTIVE_PREEMPT 1
Michel Thierryba74cb12017-11-20 12:34:58 +0000254#define EXECLISTS_ACTIVE_HWACK 2
Chris Wilsonbeecec92017-10-03 21:34:52 +0100255
256 /**
Mika Kuoppala76e70082017-09-22 15:43:07 +0300257 * @port_mask: number of execlist ports - 1
258 */
259 unsigned int port_mask;
Mika Kuoppalab620e872017-09-22 15:43:03 +0300260
261 /**
262 * @queue: queue of requests, in priority lists
263 */
264 struct rb_root queue;
265
266 /**
267 * @first: leftmost level in priority @queue
268 */
269 struct rb_node *first;
270
271 /**
272 * @fw_domains: forcewake domains for irq tasklet
273 */
274 unsigned int fw_domains;
275
276 /**
277 * @csb_head: context status buffer head
278 */
279 unsigned int csb_head;
280
281 /**
282 * @csb_use_mmio: access csb through mmio, instead of hwsp
283 */
284 bool csb_use_mmio;
285};
286
Oscar Mateo6e516142017-04-10 07:34:31 -0700287#define INTEL_ENGINE_CS_MAX_NAME 8
288
Chris Wilsonc0336662016-05-06 15:40:21 +0100289struct intel_engine_cs {
290 struct drm_i915_private *i915;
Oscar Mateo6e516142017-04-10 07:34:31 -0700291 char name[INTEL_ENGINE_CS_MAX_NAME];
Tvrtko Ursulin1803fcbc2017-11-10 14:26:27 +0000292
Michal Wajdeczko237ae7c2017-03-01 20:26:15 +0000293 enum intel_engine_id id;
Michal Wajdeczko237ae7c2017-03-01 20:26:15 +0000294 unsigned int hw_id;
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300295 unsigned int guc_id;
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700296
Tvrtko Ursulin1803fcbc2017-11-10 14:26:27 +0000297 u8 uabi_id;
298 u8 uabi_class;
299
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700300 u8 class;
301 u8 instance;
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300302 u32 context_size;
303 u32 mmio_base;
Dave Gordonc2c7f242016-07-13 16:03:35 +0100304 unsigned int irq_shift;
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300305
Chris Wilson7e37f882016-08-02 22:50:21 +0100306 struct intel_ring *buffer;
Chris Wilson73cb9702016-10-28 13:58:46 +0100307 struct intel_timeline *timeline;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800308
Chris Wilsond2b4b972017-11-10 14:26:33 +0000309 struct drm_i915_gem_object *default_state;
Chris Wilson4e50f082016-10-28 13:58:31 +0100310
Chris Wilson2246bea2017-02-17 15:13:00 +0000311 atomic_t irq_count;
Chris Wilson538b2572017-01-24 15:18:05 +0000312 unsigned long irq_posted;
313#define ENGINE_IRQ_BREADCRUMB 0
Chris Wilsonf7470262017-01-24 15:20:21 +0000314#define ENGINE_IRQ_EXECLIST 1
Chris Wilson538b2572017-01-24 15:18:05 +0000315
Chris Wilson688e6c72016-07-01 17:23:15 +0100316 /* Rather than have every client wait upon all user interrupts,
317 * with the herd waking after every interrupt and each doing the
318 * heavyweight seqno dance, we delegate the task (of being the
319 * bottom-half of the user interrupt) to the first client. After
320 * every interrupt, we wake up one client, who does the heavyweight
321 * coherent seqno read and either goes back to sleep (if incomplete),
322 * or wakes up all the completed clients in parallel, before then
323 * transferring the bottom-half status to the next client in the queue.
324 *
325 * Compared to walking the entire list of waiters in a single dedicated
326 * bottom-half, we reduce the latency of the first waiter by avoiding
327 * a context switch, but incur additional coherent seqno reads when
328 * following the chain of request breadcrumbs. Since it is most likely
329 * that we have a single client waiting on each seqno, then reducing
330 * the overhead of waking that client is much preferred.
331 */
332 struct intel_breadcrumbs {
Chris Wilson61d3dc72017-03-03 19:08:24 +0000333 spinlock_t irq_lock; /* protects irq_*; irqsafe */
334 struct intel_wait *irq_wait; /* oldest waiter by retirement */
335
336 spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
Chris Wilson688e6c72016-07-01 17:23:15 +0100337 struct rb_root waiters; /* sorted by retirement, priority */
Chris Wilsonc81d4612016-07-01 17:23:25 +0100338 struct rb_root signals; /* sorted by retirement */
Chris Wilsonc81d4612016-07-01 17:23:25 +0100339 struct task_struct *signaler; /* used for fence signalling */
Chris Wilsoncced5e22017-02-23 07:44:15 +0000340 struct drm_i915_gem_request __rcu *first_signal;
Chris Wilson688e6c72016-07-01 17:23:15 +0100341 struct timer_list fake_irq; /* used after a missed interrupt */
Chris Wilson83348ba2016-08-09 17:47:51 +0100342 struct timer_list hangcheck; /* detect missed interrupts */
343
Chris Wilson2246bea2017-02-17 15:13:00 +0000344 unsigned int hangcheck_interrupts;
Chris Wilsonbcbd5c32017-10-25 15:39:42 +0100345 unsigned int irq_enabled;
Chris Wilsonaca34b62016-07-06 12:39:02 +0100346
Chris Wilson67b807a82017-02-27 20:58:50 +0000347 bool irq_armed : 1;
Chris Wilsonf97fbf92017-02-13 17:15:14 +0000348 I915_SELFTEST_DECLARE(bool mock : 1);
Chris Wilson688e6c72016-07-01 17:23:15 +0100349 } breadcrumbs;
350
Chris Wilson06fbca72015-04-07 16:20:36 +0100351 /*
352 * A pool of objects to use as shadow copies of client batch buffers
353 * when the command parser is enabled. Prevents the client from
354 * modifying the batch contents after software parsing.
355 */
356 struct i915_gem_batch_pool batch_pool;
357
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800358 struct intel_hw_status_page status_page;
Arun Siluvery17ee9502015-06-19 19:07:01 +0100359 struct i915_ctx_workarounds wa_ctx;
Chris Wilson56c0f1a2016-08-15 10:48:58 +0100360 struct i915_vma *scratch;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800361
Chris Wilson61ff75a2016-07-01 17:23:28 +0100362 u32 irq_keep_mask; /* always keep these interrupts */
363 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
Dave Gordon38a0f2d2016-07-20 18:16:06 +0100364 void (*irq_enable)(struct intel_engine_cs *engine);
365 void (*irq_disable)(struct intel_engine_cs *engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800366
Dave Gordon38a0f2d2016-07-20 18:16:06 +0100367 int (*init_hw)(struct intel_engine_cs *engine);
Chris Wilson821ed7d2016-09-09 14:11:53 +0100368 void (*reset_hw)(struct intel_engine_cs *engine,
369 struct drm_i915_gem_request *req);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800370
Chris Wilsonaba5e272017-10-25 15:39:41 +0100371 void (*park)(struct intel_engine_cs *engine);
372 void (*unpark)(struct intel_engine_cs *engine);
373
Chris Wilsonff44ad52017-03-16 17:13:03 +0000374 void (*set_default_submission)(struct intel_engine_cs *engine);
375
Chris Wilson266a2402017-05-04 10:33:08 +0100376 struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
377 struct i915_gem_context *ctx);
Chris Wilsone8a9c582016-12-18 15:37:20 +0000378 void (*context_unpin)(struct intel_engine_cs *engine,
379 struct i915_gem_context *ctx);
Chris Wilsonf73e7392016-12-18 15:37:24 +0000380 int (*request_alloc)(struct drm_i915_gem_request *req);
John Harrison87531812015-05-29 17:43:44 +0100381 int (*init_context)(struct drm_i915_gem_request *req);
Arun Siluvery86d7f232014-08-26 14:44:50 +0100382
Chris Wilsonddd66c52016-08-02 22:50:31 +0100383 int (*emit_flush)(struct drm_i915_gem_request *request,
384 u32 mode);
385#define EMIT_INVALIDATE BIT(0)
386#define EMIT_FLUSH BIT(1)
387#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
388 int (*emit_bb_start)(struct drm_i915_gem_request *req,
389 u64 offset, u32 length,
390 unsigned int dispatch_flags);
391#define I915_DISPATCH_SECURE BIT(0)
392#define I915_DISPATCH_PINNED BIT(1)
393#define I915_DISPATCH_RS BIT(2)
Chris Wilsoncaddfe72016-10-28 13:58:52 +0100394 void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000395 u32 *cs);
Chris Wilson98f29e82016-10-28 13:58:51 +0100396 int emit_breadcrumb_sz;
Chris Wilson5590af32016-09-09 14:11:54 +0100397
398 /* Pass the request to the hardware queue (e.g. directly into
399 * the legacy ringbuffer or to the end of an execlist).
400 *
401 * This is called from an atomic context with irqs disabled; must
402 * be irq safe.
403 */
Chris Wilsonddd66c52016-08-02 22:50:31 +0100404 void (*submit_request)(struct drm_i915_gem_request *req);
Chris Wilson5590af32016-09-09 14:11:54 +0100405
Chris Wilson0de91362016-11-14 20:41:01 +0000406 /* Call when the priority on a request has changed and it and its
407 * dependencies may need rescheduling. Note the request itself may
408 * not be ready to run!
409 *
410 * Called under the struct_mutex.
411 */
412 void (*schedule)(struct drm_i915_gem_request *request,
413 int priority);
414
Chris Wilson27a5f612017-09-15 18:31:00 +0100415 /*
416 * Cancel all requests on the hardware, or queued for execution.
417 * This should only cancel the ready requests that have been
418 * submitted to the engine (via the engine->submit_request callback).
419 * This is called when marking the device as wedged.
420 */
421 void (*cancel_requests)(struct intel_engine_cs *engine);
422
Chris Wilsonb2eadbc2012-08-09 10:58:30 +0100423 /* Some chipsets are not quite as coherent as advertised and need
424 * an expensive kick to force a true read of the up-to-date seqno.
425 * However, the up-to-date seqno is not always required and the last
426 * seen value is good enough. Note that the seqno will always be
427 * monotonic, even if not coherent.
428 */
Dave Gordon38a0f2d2016-07-20 18:16:06 +0100429 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
Dave Gordon38a0f2d2016-07-20 18:16:06 +0100430 void (*cleanup)(struct intel_engine_cs *engine);
Ben Widawskyebc348b2014-04-29 14:52:28 -0700431
Ben Widawsky3e789982014-06-30 09:53:37 -0700432 /* GEN8 signal/wait table - never trust comments!
433 * signal to signal to signal to signal to signal to
434 * RCS VCS BCS VECS VCS2
435 * --------------------------------------------------------------------
436 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
437 * |-------------------------------------------------------------------
438 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
439 * |-------------------------------------------------------------------
440 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
441 * |-------------------------------------------------------------------
442 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
443 * |-------------------------------------------------------------------
444 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
445 * |-------------------------------------------------------------------
446 *
447 * Generalization:
448 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
449 * ie. transpose of g(x, y)
450 *
451 * sync from sync from sync from sync from sync from
452 * RCS VCS BCS VECS VCS2
453 * --------------------------------------------------------------------
454 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
455 * |-------------------------------------------------------------------
456 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
457 * |-------------------------------------------------------------------
458 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
459 * |-------------------------------------------------------------------
460 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
461 * |-------------------------------------------------------------------
462 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
463 * |-------------------------------------------------------------------
464 *
465 * Generalization:
466 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
467 * ie. transpose of f(x, y)
468 */
Ben Widawskyebc348b2014-04-29 14:52:28 -0700469 struct {
Ben Widawsky3e789982014-06-30 09:53:37 -0700470 union {
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +0100471#define GEN6_SEMAPHORE_LAST VECS_HW
472#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
473#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
Ben Widawsky3e789982014-06-30 09:53:37 -0700474 struct {
475 /* our mbox written by others */
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +0100476 u32 wait[GEN6_NUM_SEMAPHORES];
Ben Widawsky3e789982014-06-30 09:53:37 -0700477 /* mboxes this ring signals to */
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +0100478 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
Ben Widawsky3e789982014-06-30 09:53:37 -0700479 } mbox;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000480 u64 signal_ggtt[I915_NUM_ENGINES];
Ben Widawsky3e789982014-06-30 09:53:37 -0700481 };
Ben Widawsky78325f22014-04-29 14:52:29 -0700482
483 /* AKA wait() */
Chris Wilsonad7bdb22016-08-02 22:50:40 +0100484 int (*sync_to)(struct drm_i915_gem_request *req,
485 struct drm_i915_gem_request *signal);
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000486 u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
Ben Widawskyebc348b2014-04-29 14:52:28 -0700487 } semaphore;
Ben Widawskyad776f82013-05-28 19:22:18 -0700488
Mika Kuoppalab620e872017-09-22 15:43:03 +0300489 struct intel_engine_execlists execlists;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100490
Chris Wilsone8a9c582016-12-18 15:37:20 +0000491 /* Contexts are pinned whilst they are active on the GPU. The last
492 * context executed remains active whilst the GPU is idle - the
493 * switch away and write to the context object only occurs on the
494 * next execution. Contexts are only unpinned on retirement of the
495 * following request ensuring that we can always write to the object
496 * on the context switch even after idling. Across suspend, we switch
497 * to the kernel context and trash it as the save may not happen
498 * before the hardware is powered down.
499 */
500 struct i915_gem_context *last_retired_context;
501
502 /* We track the current MI_SET_CONTEXT in order to eliminate
503 * redudant context switches. This presumes that requests are not
504 * reordered! Or when they are the tracking is updated along with
505 * the emission of individual requests into the legacy command
506 * stream (ring).
507 */
508 struct i915_gem_context *legacy_active_context;
Ben Widawsky40521052012-06-04 14:42:43 -0700509
Changbin Du3fc03062017-03-13 10:47:11 +0800510 /* status_notifier: list of callbacks for context-switch changes */
511 struct atomic_notifier_head context_status_notifier;
512
Chris Wilson7e37f882016-08-02 22:50:21 +0100513 struct intel_engine_hangcheck hangcheck;
Mika Kuoppala92cab732013-05-24 17:16:07 +0300514
Brad Volkin44e895a2014-05-10 14:10:43 -0700515 bool needs_cmd_parser;
516
Brad Volkin351e3db2014-02-18 10:15:46 -0800517 /*
Brad Volkin44e895a2014-05-10 14:10:43 -0700518 * Table of commands the command parser needs to know about
Chris Wilson33a051a2016-07-27 09:07:26 +0100519 * for this engine.
Brad Volkin351e3db2014-02-18 10:15:46 -0800520 */
Brad Volkin44e895a2014-05-10 14:10:43 -0700521 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
Brad Volkin351e3db2014-02-18 10:15:46 -0800522
523 /*
524 * Table of registers allowed in commands that read/write registers.
525 */
Jordan Justen361b0272016-03-06 23:30:27 -0800526 const struct drm_i915_reg_table *reg_tables;
527 int reg_table_count;
Brad Volkin351e3db2014-02-18 10:15:46 -0800528
529 /*
530 * Returns the bitmask for the length field of the specified command.
531 * Return 0 for an unrecognized/invalid command.
532 *
Chris Wilson33a051a2016-07-27 09:07:26 +0100533 * If the command parser finds an entry for a command in the engine's
Brad Volkin351e3db2014-02-18 10:15:46 -0800534 * cmd_tables, it gets the command's length based on the table entry.
Chris Wilson33a051a2016-07-27 09:07:26 +0100535 * If not, it calls this function to determine the per-engine length
536 * field encoding for the command (i.e. different opcode ranges use
537 * certain bits to encode the command length in the header).
Brad Volkin351e3db2014-02-18 10:15:46 -0800538 */
539 u32 (*get_cmd_length_mask)(u32 cmd_header);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800540};
541
Chris Wilson4a118ec2017-10-23 22:32:36 +0100542static inline void
543execlists_set_active(struct intel_engine_execlists *execlists,
544 unsigned int bit)
545{
546 __set_bit(bit, (unsigned long *)&execlists->active);
547}
548
549static inline void
550execlists_clear_active(struct intel_engine_execlists *execlists,
551 unsigned int bit)
552{
553 __clear_bit(bit, (unsigned long *)&execlists->active);
554}
555
556static inline bool
557execlists_is_active(const struct intel_engine_execlists *execlists,
558 unsigned int bit)
559{
560 return test_bit(bit, (unsigned long *)&execlists->active);
561}
562
MichaƂ Winiarskic41937f2017-10-26 15:35:58 +0200563void
564execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
565
566void
567execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
568
Mika Kuoppala76e70082017-09-22 15:43:07 +0300569static inline unsigned int
570execlists_num_ports(const struct intel_engine_execlists * const execlists)
571{
572 return execlists->port_mask + 1;
573}
574
Mika Kuoppala7a62cc62017-09-22 15:43:06 +0300575static inline void
576execlists_port_complete(struct intel_engine_execlists * const execlists,
577 struct execlist_port * const port)
578{
Mika Kuoppala76e70082017-09-22 15:43:07 +0300579 const unsigned int m = execlists->port_mask;
Mika Kuoppala7a62cc62017-09-22 15:43:06 +0300580
581 GEM_BUG_ON(port_index(port, execlists) != 0);
Chris Wilson4a118ec2017-10-23 22:32:36 +0100582 GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
Mika Kuoppala7a62cc62017-09-22 15:43:06 +0300583
Mika Kuoppala76e70082017-09-22 15:43:07 +0300584 memmove(port, port + 1, m * sizeof(struct execlist_port));
585 memset(port + m, 0, sizeof(struct execlist_port));
Mika Kuoppala7a62cc62017-09-22 15:43:06 +0300586}
587
Chris Wilson59ce1312017-03-24 16:35:40 +0000588static inline unsigned int
Chris Wilson67d97da2016-07-04 08:08:31 +0100589intel_engine_flag(const struct intel_engine_cs *engine)
Daniel Vetter96154f22011-12-14 13:57:00 +0100590{
Chris Wilson59ce1312017-03-24 16:35:40 +0000591 return BIT(engine->id);
Daniel Vetter96154f22011-12-14 13:57:00 +0100592}
593
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000594static inline u32
Chris Wilson5dd8e502016-04-09 10:57:57 +0100595intel_read_status_page(struct intel_engine_cs *engine, int reg)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800596{
Daniel Vetter4225d0f2012-04-26 23:28:16 +0200597 /* Ensure that the compiler doesn't optimize away the load. */
Chris Wilson5dd8e502016-04-09 10:57:57 +0100598 return READ_ONCE(engine->status_page.page_addr[reg]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800599}
600
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200601static inline void
Chris Wilson9a29dd82017-03-24 16:35:38 +0000602intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200603{
Chris Wilson9a29dd82017-03-24 16:35:38 +0000604 /* Writing into the status page should be done sparingly. Since
605 * we do when we are uncertain of the device state, we take a bit
606 * of extra paranoia to try and ensure that the HWS takes the value
607 * we give and that it doesn't end up trapped inside the CPU!
608 */
609 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
610 mb();
611 clflush(&engine->status_page.page_addr[reg]);
612 engine->status_page.page_addr[reg] = value;
613 clflush(&engine->status_page.page_addr[reg]);
614 mb();
615 } else {
616 WRITE_ONCE(engine->status_page.page_addr[reg], value);
617 }
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200618}
619
Jani Nikulae2828912016-01-18 09:19:47 +0200620/*
Chris Wilson311bd682011-01-13 19:06:50 +0000621 * Reads a dword out of the status page, which is written to from the command
622 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
623 * MI_STORE_DATA_IMM.
624 *
625 * The following dwords have a reserved meaning:
626 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
627 * 0x04: ring 0 head pointer
628 * 0x05: ring 1 head pointer (915-class)
629 * 0x06: ring 2 head pointer (915-class)
630 * 0x10-0x1b: Context status DWords (GM45)
631 * 0x1f: Last written status offset. (GM45)
Thomas Danielb07da532015-02-18 11:48:21 +0000632 * 0x20-0x2f: Reserved (Gen6+)
Chris Wilson311bd682011-01-13 19:06:50 +0000633 *
Thomas Danielb07da532015-02-18 11:48:21 +0000634 * The area from dword 0x30 to 0x3ff is available for driver usage.
Chris Wilson311bd682011-01-13 19:06:50 +0000635 */
Thomas Danielb07da532015-02-18 11:48:21 +0000636#define I915_GEM_HWS_INDEX 0x30
Chris Wilson7c17d372016-01-20 15:43:35 +0200637#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
MichaƂ Winiarski3b8a8a32017-10-25 22:00:16 +0200638#define I915_GEM_HWS_PREEMPT_INDEX 0x32
639#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
Thomas Danielb07da532015-02-18 11:48:21 +0000640#define I915_GEM_HWS_SCRATCH_INDEX 0x40
Jesse Barnes9a289772012-10-26 09:42:42 -0700641#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
Chris Wilson311bd682011-01-13 19:06:50 +0000642
Chris Wilson6d2cb5a2017-09-13 14:35:34 +0100643#define I915_HWS_CSB_BUF0_INDEX 0x10
Chris Wilson767a9832017-09-13 09:56:05 +0100644#define I915_HWS_CSB_WRITE_INDEX 0x1f
645#define CNL_HWS_CSB_WRITE_INDEX 0x2f
Chris Wilson6d2cb5a2017-09-13 14:35:34 +0100646
Chris Wilson7e37f882016-08-02 22:50:21 +0100647struct intel_ring *
648intel_engine_create_ring(struct intel_engine_cs *engine, int size);
Chris Wilsond822bb12017-04-03 12:34:25 +0100649int intel_ring_pin(struct intel_ring *ring,
650 struct drm_i915_private *i915,
651 unsigned int offset_bias);
Chris Wilsone6ba9992017-04-25 14:00:49 +0100652void intel_ring_reset(struct intel_ring *ring, u32 tail);
Chris Wilson95aebcb2017-05-04 14:08:45 +0100653unsigned int intel_ring_update_space(struct intel_ring *ring);
Chris Wilsonaad29fb2016-08-02 22:50:23 +0100654void intel_ring_unpin(struct intel_ring *ring);
Chris Wilson7e37f882016-08-02 22:50:21 +0100655void intel_ring_free(struct intel_ring *ring);
Oscar Mateo84c23772014-07-24 17:04:15 +0100656
Chris Wilson7e37f882016-08-02 22:50:21 +0100657void intel_engine_stop(struct intel_engine_cs *engine);
658void intel_engine_cleanup(struct intel_engine_cs *engine);
Ben Widawsky96f298a2011-03-19 18:14:27 -0700659
Chris Wilson821ed7d2016-09-09 14:11:53 +0100660void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
661
John Harrisonbba09b12015-05-29 17:44:06 +0100662int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
Chris Wilson406ea8d2016-07-20 13:31:55 +0100663
Chris Wilsonfd138212017-11-15 15:12:04 +0000664int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
Chris Wilson5e5655c2017-05-04 14:08:46 +0100665u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
666 unsigned int n);
Chris Wilson406ea8d2016-07-20 13:31:55 +0100667
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000668static inline void
669intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
Chris Wilson09246732013-08-10 22:16:32 +0100670{
Chris Wilson8f942012016-08-02 22:50:30 +0100671 /* Dummy function.
672 *
673 * This serves as a placeholder in the code so that the reader
674 * can compare against the preceding intel_ring_begin() and
675 * check that the number of dwords emitted matches the space
676 * reserved for the command packet (i.e. the value passed to
677 * intel_ring_begin()).
Chris Wilsonc5efa1a2016-08-02 22:50:29 +0100678 */
Chris Wilsone6ba9992017-04-25 14:00:49 +0100679 GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
Chris Wilson8f942012016-08-02 22:50:30 +0100680}
681
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000682static inline u32
Chris Wilson450362d2017-03-27 14:00:07 +0100683intel_ring_wrap(const struct intel_ring *ring, u32 pos)
684{
685 return pos & (ring->size - 1);
686}
687
688static inline u32
689intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
Chris Wilson8f942012016-08-02 22:50:30 +0100690{
691 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000692 u32 offset = addr - req->ring->vaddr;
693 GEM_BUG_ON(offset > req->ring->size);
Chris Wilson450362d2017-03-27 14:00:07 +0100694 return intel_ring_wrap(req->ring, offset);
Chris Wilson09246732013-08-10 22:16:32 +0100695}
Chris Wilson406ea8d2016-07-20 13:31:55 +0100696
Chris Wilsoned1501d2017-03-27 14:14:12 +0100697static inline void
698assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
699{
700 /* We could combine these into a single tail operation, but keeping
701 * them as seperate tests will help identify the cause should one
702 * ever fire.
703 */
704 GEM_BUG_ON(!IS_ALIGNED(tail, 8));
705 GEM_BUG_ON(tail >= ring->size);
Chris Wilson605d5b32017-05-04 14:08:44 +0100706
707 /*
708 * "Ring Buffer Use"
709 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
710 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
711 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
712 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
713 * same cacheline, the Head Pointer must not be greater than the Tail
714 * Pointer."
715 *
716 * We use ring->head as the last known location of the actual RING_HEAD,
717 * it may have advanced but in the worst case it is equally the same
718 * as ring->head and so we should never program RING_TAIL to advance
719 * into the same cacheline as ring->head.
720 */
721#define cacheline(a) round_down(a, CACHELINE_BYTES)
722 GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
723 tail < ring->head);
724#undef cacheline
Chris Wilsoned1501d2017-03-27 14:14:12 +0100725}
726
Chris Wilsone6ba9992017-04-25 14:00:49 +0100727static inline unsigned int
728intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
729{
730 /* Whilst writes to the tail are strictly order, there is no
731 * serialisation between readers and the writers. The tail may be
732 * read by i915_gem_request_retire() just as it is being updated
733 * by execlists, as although the breadcrumb is complete, the context
734 * switch hasn't been seen.
735 */
736 assert_ring_tail_valid(ring, tail);
737 ring->tail = tail;
738 return tail;
739}
Chris Wilson09246732013-08-10 22:16:32 +0100740
Chris Wilson73cb9702016-10-28 13:58:46 +0100741void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800742
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100743void intel_engine_setup_common(struct intel_engine_cs *engine);
744int intel_engine_init_common(struct intel_engine_cs *engine);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100745int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
Chris Wilson96a945a2016-08-03 13:19:16 +0100746void intel_engine_cleanup_common(struct intel_engine_cs *engine);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100747
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +0100748int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
749int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +0100750int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
751int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800752
Chris Wilson7e37f882016-08-02 22:50:21 +0100753u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
Chris Wilson1b365952016-10-04 21:11:31 +0100754u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
755
Chris Wilson1b7744e2016-07-01 17:23:17 +0100756static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
757{
758 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
759}
Daniel Vetter79f321b2010-09-24 21:20:10 +0200760
Chris Wilsoncb399ea2016-11-01 10:03:16 +0000761static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
762{
763 /* We are only peeking at the tail of the submit queue (and not the
764 * queue itself) in order to gain a hint as to the current active
765 * state of the engine. Callers are not expected to be taking
766 * engine->timeline->lock, nor are they expected to be concerned
767 * wtih serialising this hint with anything, so document it as
768 * a hint and nothing more.
769 */
Chris Wilson9b6586a2017-02-23 07:44:08 +0000770 return READ_ONCE(engine->timeline->seqno);
Chris Wilsoncb399ea2016-11-01 10:03:16 +0000771}
772
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000773int init_workarounds_ring(struct intel_engine_cs *engine);
Tvrtko Ursulin4ac96592017-02-14 15:00:17 +0000774int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
Michel Thierry771b9a52014-11-11 16:47:33 +0000775
Chris Wilson0e704472016-10-12 10:05:17 +0100776void intel_engine_get_instdone(struct intel_engine_cs *engine,
777 struct intel_instdone *instdone);
778
John Harrison29b1b412015-06-18 13:10:09 +0100779/*
780 * Arbitrary size for largest possible 'add request' sequence. The code paths
781 * are complex and variable. Empirical measurement shows that the worst case
Chris Wilson596e5ef2016-04-29 09:07:04 +0100782 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
783 * we need to allocate double the largest single packet within that emission
784 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
John Harrison29b1b412015-06-18 13:10:09 +0100785 */
Chris Wilson596e5ef2016-04-29 09:07:04 +0100786#define MIN_SPACE_FOR_ADD_REQUEST 336
John Harrison29b1b412015-06-18 13:10:09 +0100787
Chris Wilsona58c01a2016-04-29 13:18:21 +0100788static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
789{
Chris Wilson57e88532016-08-15 10:48:57 +0100790 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
Chris Wilsona58c01a2016-04-29 13:18:21 +0100791}
792
MichaƂ Winiarski3b8a8a32017-10-25 22:00:16 +0200793static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
794{
795 return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
796}
797
Chris Wilson688e6c72016-07-01 17:23:15 +0100798/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
Chris Wilson688e6c72016-07-01 17:23:15 +0100799int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
800
Chris Wilson56299fb2017-02-27 20:58:48 +0000801static inline void intel_wait_init(struct intel_wait *wait,
802 struct drm_i915_gem_request *rq)
Chris Wilson754c9fd2017-02-23 07:44:14 +0000803{
804 wait->tsk = current;
Chris Wilson56299fb2017-02-27 20:58:48 +0000805 wait->request = rq;
Chris Wilson754c9fd2017-02-23 07:44:14 +0000806}
807
808static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
Chris Wilson688e6c72016-07-01 17:23:15 +0100809{
810 wait->tsk = current;
811 wait->seqno = seqno;
812}
813
Chris Wilson754c9fd2017-02-23 07:44:14 +0000814static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
815{
816 return wait->seqno;
817}
818
819static inline bool
820intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
821{
822 wait->seqno = seqno;
823 return intel_wait_has_seqno(wait);
824}
825
826static inline bool
827intel_wait_update_request(struct intel_wait *wait,
828 const struct drm_i915_gem_request *rq)
829{
830 return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
831}
832
833static inline bool
834intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
835{
836 return wait->seqno == seqno;
837}
838
839static inline bool
840intel_wait_check_request(const struct intel_wait *wait,
841 const struct drm_i915_gem_request *rq)
842{
843 return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
844}
845
Chris Wilson688e6c72016-07-01 17:23:15 +0100846static inline bool intel_wait_complete(const struct intel_wait *wait)
847{
848 return RB_EMPTY_NODE(&wait->node);
849}
850
851bool intel_engine_add_wait(struct intel_engine_cs *engine,
852 struct intel_wait *wait);
853void intel_engine_remove_wait(struct intel_engine_cs *engine,
854 struct intel_wait *wait);
Chris Wilsonf7b02a52017-04-26 09:06:59 +0100855void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
856 bool wakeup);
Chris Wilson9eb143b2017-02-23 07:44:16 +0000857void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
Chris Wilson688e6c72016-07-01 17:23:15 +0100858
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100859static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
Chris Wilson688e6c72016-07-01 17:23:15 +0100860{
Chris Wilson61d3dc72017-03-03 19:08:24 +0000861 return READ_ONCE(engine->breadcrumbs.irq_wait);
Chris Wilson688e6c72016-07-01 17:23:15 +0100862}
863
Chris Wilson8d769ea2017-02-27 20:58:47 +0000864unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
865#define ENGINE_WAKEUP_WAITER BIT(0)
Chris Wilson67b807a82017-02-27 20:58:50 +0000866#define ENGINE_WAKEUP_ASLEEP BIT(1)
867
Chris Wilsonbcbd5c32017-10-25 15:39:42 +0100868void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
869void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
870
Chris Wilson67b807a82017-02-27 20:58:50 +0000871void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
872void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100873
Chris Wilsonad07dfc2016-10-07 07:53:26 +0100874void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100875void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
Chris Wilson9b6586a2017-02-23 07:44:08 +0000876bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100877
Tvrtko Ursulin9f235df2017-02-16 12:23:25 +0000878static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
879{
880 memset(batch, 0, 6 * sizeof(u32));
881
882 batch[0] = GFX_OP_PIPE_CONTROL(6);
883 batch[1] = flags;
884 batch[2] = offset;
885
886 return batch + 6;
887}
888
MichaƂ Winiarskidf77cd82017-10-25 22:00:15 +0200889static inline u32 *
890gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
891{
892 /* We're using qword write, offset should be aligned to 8 bytes. */
893 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
894
895 /* w/a for post sync ops following a GPGPU operation we
896 * need a prior CS_STALL, which is emitted by the flush
897 * following the batch.
898 */
899 *cs++ = GFX_OP_PIPE_CONTROL(6);
900 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
901 PIPE_CONTROL_QW_WRITE;
902 *cs++ = gtt_offset;
903 *cs++ = 0;
904 *cs++ = value;
905 /* We're thrashing one dword of HWS. */
906 *cs++ = 0;
907
908 return cs;
909}
910
911static inline u32 *
912gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
913{
914 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
915 GEM_BUG_ON(gtt_offset & (1 << 5));
916 /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
917 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
918
919 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
920 *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
921 *cs++ = 0;
922 *cs++ = value;
923
924 return cs;
925}
926
Chris Wilson54003672017-03-03 12:19:46 +0000927bool intel_engine_is_idle(struct intel_engine_cs *engine);
Chris Wilson05425242017-03-03 12:19:47 +0000928bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
Chris Wilson54003672017-03-03 12:19:46 +0000929
Chris Wilson20ccd4d2017-10-24 23:08:55 +0100930bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
931
Chris Wilsonaba5e272017-10-25 15:39:41 +0100932void intel_engines_park(struct drm_i915_private *i915);
933void intel_engines_unpark(struct drm_i915_private *i915);
934
Chris Wilsonff44ad52017-03-16 17:13:03 +0000935void intel_engines_reset_default_submission(struct drm_i915_private *i915);
Chris Wilsond2b4b972017-11-10 14:26:33 +0000936unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
Chris Wilsonff44ad52017-03-16 17:13:03 +0000937
Chris Wilson90cad092017-09-06 16:28:59 +0100938bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
Chris Wilsonf2f5c062017-08-16 09:52:04 +0100939
Chris Wilsonf636edb2017-10-09 12:02:57 +0100940void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
941
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800942#endif /* _INTEL_RINGBUFFER_H_ */