Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1 | #ifndef _INTEL_RINGBUFFER_H_ |
| 2 | #define _INTEL_RINGBUFFER_H_ |
| 3 | |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 4 | #include <linux/hashtable.h> |
Chris Wilson | 06fbca7 | 2015-04-07 16:20:36 +0100 | [diff] [blame] | 5 | #include "i915_gem_batch_pool.h" |
Chris Wilson | dcff85c | 2016-08-05 10:14:11 +0100 | [diff] [blame] | 6 | #include "i915_gem_request.h" |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 7 | #include "i915_gem_timeline.h" |
Chris Wilson | f97fbf9 | 2017-02-13 17:15:14 +0000 | [diff] [blame] | 8 | #include "i915_selftest.h" |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 9 | |
| 10 | #define I915_CMD_HASH_ORDER 9 |
| 11 | |
Oscar Mateo | 4712274 | 2014-07-24 17:04:28 +0100 | [diff] [blame] | 12 | /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, |
| 13 | * but keeps the logic simple. Indeed, the whole purpose of this macro is just |
| 14 | * to give some inclination as to some of the magic values used in the various |
| 15 | * workarounds! |
| 16 | */ |
| 17 | #define CACHELINE_BYTES 64 |
Arun Siluvery | 17ee950 | 2015-06-19 19:07:01 +0100 | [diff] [blame] | 18 | #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) |
Oscar Mateo | 4712274 | 2014-07-24 17:04:28 +0100 | [diff] [blame] | 19 | |
Chris Wilson | 57e8853 | 2016-08-15 10:48:57 +0100 | [diff] [blame] | 20 | struct intel_hw_status_page { |
| 21 | struct i915_vma *vma; |
| 22 | u32 *page_addr; |
| 23 | u32 ggtt_offset; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 24 | }; |
| 25 | |
Dave Gordon | bbdc070a | 2016-07-20 18:16:05 +0100 | [diff] [blame] | 26 | #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base)) |
| 27 | #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 28 | |
Dave Gordon | bbdc070a | 2016-07-20 18:16:05 +0100 | [diff] [blame] | 29 | #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base)) |
| 30 | #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 31 | |
Dave Gordon | bbdc070a | 2016-07-20 18:16:05 +0100 | [diff] [blame] | 32 | #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base)) |
| 33 | #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 34 | |
Dave Gordon | bbdc070a | 2016-07-20 18:16:05 +0100 | [diff] [blame] | 35 | #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base)) |
| 36 | #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 37 | |
Dave Gordon | bbdc070a | 2016-07-20 18:16:05 +0100 | [diff] [blame] | 38 | #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base)) |
| 39 | #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val) |
Daniel Vetter | 870e86d | 2010-08-02 16:29:44 +0200 | [diff] [blame] | 40 | |
Dave Gordon | bbdc070a | 2016-07-20 18:16:05 +0100 | [diff] [blame] | 41 | #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base)) |
| 42 | #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val) |
Naresh Kumar Kachhi | e9fea57 | 2014-03-12 16:39:41 +0530 | [diff] [blame] | 43 | |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 44 | /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to |
| 45 | * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. |
| 46 | */ |
Chris Wilson | 8c12672 | 2016-04-07 07:29:14 +0100 | [diff] [blame] | 47 | #define gen8_semaphore_seqno_size sizeof(uint64_t) |
| 48 | #define GEN8_SEMAPHORE_OFFSET(__from, __to) \ |
| 49 | (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size) |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 50 | #define GEN8_SIGNAL_OFFSET(__ring, to) \ |
Chris Wilson | 51d545d | 2016-08-15 10:49:02 +0100 | [diff] [blame] | 51 | (dev_priv->semaphore->node.start + \ |
Chris Wilson | 8c12672 | 2016-04-07 07:29:14 +0100 | [diff] [blame] | 52 | GEN8_SEMAPHORE_OFFSET((__ring)->id, (to))) |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 53 | #define GEN8_WAIT_OFFSET(__ring, from) \ |
Chris Wilson | 51d545d | 2016-08-15 10:49:02 +0100 | [diff] [blame] | 54 | (dev_priv->semaphore->node.start + \ |
Chris Wilson | 8c12672 | 2016-04-07 07:29:14 +0100 | [diff] [blame] | 55 | GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 56 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 57 | enum intel_engine_hangcheck_action { |
Mika Kuoppala | 3fe3b03 | 2016-11-18 15:09:04 +0200 | [diff] [blame] | 58 | ENGINE_IDLE = 0, |
| 59 | ENGINE_WAIT, |
| 60 | ENGINE_ACTIVE_SEQNO, |
| 61 | ENGINE_ACTIVE_HEAD, |
| 62 | ENGINE_ACTIVE_SUBUNITS, |
| 63 | ENGINE_WAIT_KICK, |
| 64 | ENGINE_DEAD, |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 65 | }; |
Mika Kuoppala | ad8beae | 2013-06-12 12:35:32 +0300 | [diff] [blame] | 66 | |
Mika Kuoppala | 3fe3b03 | 2016-11-18 15:09:04 +0200 | [diff] [blame] | 67 | static inline const char * |
| 68 | hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) |
| 69 | { |
| 70 | switch (a) { |
| 71 | case ENGINE_IDLE: |
| 72 | return "idle"; |
| 73 | case ENGINE_WAIT: |
| 74 | return "wait"; |
| 75 | case ENGINE_ACTIVE_SEQNO: |
| 76 | return "active seqno"; |
| 77 | case ENGINE_ACTIVE_HEAD: |
| 78 | return "active head"; |
| 79 | case ENGINE_ACTIVE_SUBUNITS: |
| 80 | return "active subunits"; |
| 81 | case ENGINE_WAIT_KICK: |
| 82 | return "wait kick"; |
| 83 | case ENGINE_DEAD: |
| 84 | return "dead"; |
| 85 | } |
| 86 | |
| 87 | return "unknown"; |
| 88 | } |
Mika Kuoppala | b6b0fac | 2014-01-30 19:04:43 +0200 | [diff] [blame] | 89 | |
Ben Widawsky | f9e6137 | 2016-09-20 16:54:33 +0300 | [diff] [blame] | 90 | #define I915_MAX_SLICES 3 |
| 91 | #define I915_MAX_SUBSLICES 3 |
| 92 | |
| 93 | #define instdone_slice_mask(dev_priv__) \ |
| 94 | (INTEL_GEN(dev_priv__) == 7 ? \ |
| 95 | 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask) |
| 96 | |
| 97 | #define instdone_subslice_mask(dev_priv__) \ |
| 98 | (INTEL_GEN(dev_priv__) == 7 ? \ |
| 99 | 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask) |
| 100 | |
| 101 | #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ |
| 102 | for ((slice__) = 0, (subslice__) = 0; \ |
| 103 | (slice__) < I915_MAX_SLICES; \ |
| 104 | (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ |
| 105 | (slice__) += ((subslice__) == 0)) \ |
| 106 | for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ |
| 107 | (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) |
| 108 | |
Ben Widawsky | d636951 | 2016-09-20 16:54:32 +0300 | [diff] [blame] | 109 | struct intel_instdone { |
| 110 | u32 instdone; |
| 111 | /* The following exist only in the RCS engine */ |
| 112 | u32 slice_common; |
Ben Widawsky | f9e6137 | 2016-09-20 16:54:33 +0300 | [diff] [blame] | 113 | u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES]; |
| 114 | u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; |
Ben Widawsky | d636951 | 2016-09-20 16:54:32 +0300 | [diff] [blame] | 115 | }; |
| 116 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 117 | struct intel_engine_hangcheck { |
Chris Wilson | 5087744 | 2014-03-21 12:41:53 +0000 | [diff] [blame] | 118 | u64 acthd; |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 119 | u32 seqno; |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 120 | enum intel_engine_hangcheck_action action; |
Mika Kuoppala | 3fe3b03 | 2016-11-18 15:09:04 +0200 | [diff] [blame] | 121 | unsigned long action_timestamp; |
Chris Wilson | 4be1738 | 2014-06-06 10:22:29 +0100 | [diff] [blame] | 122 | int deadlock; |
Ben Widawsky | d636951 | 2016-09-20 16:54:32 +0300 | [diff] [blame] | 123 | struct intel_instdone instdone; |
Michel Thierry | c64992e | 2017-06-20 10:57:44 +0100 | [diff] [blame] | 124 | struct drm_i915_gem_request *active_request; |
Mika Kuoppala | 3fe3b03 | 2016-11-18 15:09:04 +0200 | [diff] [blame] | 125 | bool stalled; |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 126 | }; |
| 127 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 128 | struct intel_ring { |
Tvrtko Ursulin | 0eb973d | 2016-01-15 15:10:28 +0000 | [diff] [blame] | 129 | struct i915_vma *vma; |
Chris Wilson | 57e8853 | 2016-08-15 10:48:57 +0100 | [diff] [blame] | 130 | void *vaddr; |
Oscar Mateo | 8ee1497 | 2014-05-22 14:13:34 +0100 | [diff] [blame] | 131 | |
Chris Wilson | 675d9ad | 2016-08-04 07:52:36 +0100 | [diff] [blame] | 132 | struct list_head request_list; |
| 133 | |
Oscar Mateo | 8ee1497 | 2014-05-22 14:13:34 +0100 | [diff] [blame] | 134 | u32 head; |
| 135 | u32 tail; |
Chris Wilson | e6ba999 | 2017-04-25 14:00:49 +0100 | [diff] [blame] | 136 | u32 emit; |
Chris Wilson | eca56a3 | 2017-02-06 17:05:01 +0000 | [diff] [blame] | 137 | |
Chris Wilson | 605d5b3 | 2017-05-04 14:08:44 +0100 | [diff] [blame] | 138 | u32 space; |
| 139 | u32 size; |
| 140 | u32 effective_size; |
Oscar Mateo | 8ee1497 | 2014-05-22 14:13:34 +0100 | [diff] [blame] | 141 | }; |
| 142 | |
Chris Wilson | e2efd13 | 2016-05-24 14:53:34 +0100 | [diff] [blame] | 143 | struct i915_gem_context; |
Jordan Justen | 361b027 | 2016-03-06 23:30:27 -0800 | [diff] [blame] | 144 | struct drm_i915_reg_table; |
Nick Hoath | 2107637 | 2015-01-15 13:10:38 +0000 | [diff] [blame] | 145 | |
Arun Siluvery | 17ee950 | 2015-06-19 19:07:01 +0100 | [diff] [blame] | 146 | /* |
| 147 | * we use a single page to load ctx workarounds so all of these |
| 148 | * values are referred in terms of dwords |
| 149 | * |
| 150 | * struct i915_wa_ctx_bb: |
| 151 | * offset: specifies batch starting position, also helpful in case |
| 152 | * if we want to have multiple batches at different offsets based on |
| 153 | * some criteria. It is not a requirement at the moment but provides |
| 154 | * an option for future use. |
| 155 | * size: size of the batch in DWORDS |
| 156 | */ |
Chris Wilson | 48bb74e | 2016-08-15 10:49:04 +0100 | [diff] [blame] | 157 | struct i915_ctx_workarounds { |
Arun Siluvery | 17ee950 | 2015-06-19 19:07:01 +0100 | [diff] [blame] | 158 | struct i915_wa_ctx_bb { |
| 159 | u32 offset; |
| 160 | u32 size; |
| 161 | } indirect_ctx, per_ctx; |
Chris Wilson | 48bb74e | 2016-08-15 10:49:04 +0100 | [diff] [blame] | 162 | struct i915_vma *vma; |
Arun Siluvery | 17ee950 | 2015-06-19 19:07:01 +0100 | [diff] [blame] | 163 | }; |
| 164 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 165 | struct drm_i915_gem_request; |
Chris Wilson | 4e50f08 | 2016-10-28 13:58:31 +0100 | [diff] [blame] | 166 | struct intel_render_state; |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 167 | |
Michal Wajdeczko | 237ae7c | 2017-03-01 20:26:15 +0000 | [diff] [blame] | 168 | /* |
| 169 | * Engine IDs definitions. |
| 170 | * Keep instances of the same type engine together. |
| 171 | */ |
| 172 | enum intel_engine_id { |
| 173 | RCS = 0, |
| 174 | BCS, |
| 175 | VCS, |
| 176 | VCS2, |
| 177 | #define _VCS(n) (VCS + (n)) |
| 178 | VECS |
| 179 | }; |
| 180 | |
Chris Wilson | 6c06757 | 2017-05-17 13:10:03 +0100 | [diff] [blame] | 181 | struct i915_priolist { |
| 182 | struct rb_node node; |
| 183 | struct list_head requests; |
| 184 | int priority; |
| 185 | }; |
| 186 | |
Mika Kuoppala | b620e87 | 2017-09-22 15:43:03 +0300 | [diff] [blame] | 187 | /** |
| 188 | * struct intel_engine_execlists - execlist submission queue and port state |
| 189 | * |
| 190 | * The struct intel_engine_execlists represents the combined logical state of |
| 191 | * driver and the hardware state for execlist mode of submission. |
| 192 | */ |
| 193 | struct intel_engine_execlists { |
| 194 | /** |
| 195 | * @irq_tasklet: softirq tasklet for bottom handler |
| 196 | */ |
| 197 | struct tasklet_struct irq_tasklet; |
| 198 | |
| 199 | /** |
| 200 | * @default_priolist: priority list for I915_PRIORITY_NORMAL |
| 201 | */ |
| 202 | struct i915_priolist default_priolist; |
| 203 | |
| 204 | /** |
| 205 | * @no_priolist: priority lists disabled |
| 206 | */ |
| 207 | bool no_priolist; |
| 208 | |
| 209 | /** |
| 210 | * @port: execlist port states |
| 211 | * |
| 212 | * For each hardware ELSP (ExecList Submission Port) we keep |
| 213 | * track of the last request and the number of times we submitted |
| 214 | * that port to hw. We then count the number of times the hw reports |
| 215 | * a context completion or preemption. As only one context can |
| 216 | * be active on hw, we limit resubmission of context to port[0]. This |
| 217 | * is called Lite Restore, of the context. |
| 218 | */ |
| 219 | struct execlist_port { |
| 220 | /** |
| 221 | * @request_count: combined request and submission count |
| 222 | */ |
| 223 | struct drm_i915_gem_request *request_count; |
| 224 | #define EXECLIST_COUNT_BITS 2 |
| 225 | #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) |
| 226 | #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) |
| 227 | #define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) |
| 228 | #define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) |
| 229 | #define port_set(p, packed) ((p)->request_count = (packed)) |
| 230 | #define port_isset(p) ((p)->request_count) |
Mika Kuoppala | 7a62cc6 | 2017-09-22 15:43:06 +0300 | [diff] [blame] | 231 | #define port_index(p, execlists) ((p) - (execlists)->port) |
Mika Kuoppala | b620e87 | 2017-09-22 15:43:03 +0300 | [diff] [blame] | 232 | |
| 233 | /** |
| 234 | * @context_id: context ID for port |
| 235 | */ |
| 236 | GEM_DEBUG_DECL(u32 context_id); |
Mika Kuoppala | 76e7008 | 2017-09-22 15:43:07 +0300 | [diff] [blame] | 237 | |
| 238 | #define EXECLIST_MAX_PORTS 2 |
| 239 | } port[EXECLIST_MAX_PORTS]; |
| 240 | |
| 241 | /** |
Chris Wilson | beecec9 | 2017-10-03 21:34:52 +0100 | [diff] [blame^] | 242 | * @preempt: are we currently handling a preempting context switch? |
| 243 | */ |
| 244 | bool preempt; |
| 245 | |
| 246 | /** |
Mika Kuoppala | 76e7008 | 2017-09-22 15:43:07 +0300 | [diff] [blame] | 247 | * @port_mask: number of execlist ports - 1 |
| 248 | */ |
| 249 | unsigned int port_mask; |
Mika Kuoppala | b620e87 | 2017-09-22 15:43:03 +0300 | [diff] [blame] | 250 | |
| 251 | /** |
| 252 | * @queue: queue of requests, in priority lists |
| 253 | */ |
| 254 | struct rb_root queue; |
| 255 | |
| 256 | /** |
| 257 | * @first: leftmost level in priority @queue |
| 258 | */ |
| 259 | struct rb_node *first; |
| 260 | |
| 261 | /** |
| 262 | * @fw_domains: forcewake domains for irq tasklet |
| 263 | */ |
| 264 | unsigned int fw_domains; |
| 265 | |
| 266 | /** |
| 267 | * @csb_head: context status buffer head |
| 268 | */ |
| 269 | unsigned int csb_head; |
| 270 | |
| 271 | /** |
| 272 | * @csb_use_mmio: access csb through mmio, instead of hwsp |
| 273 | */ |
| 274 | bool csb_use_mmio; |
| 275 | }; |
| 276 | |
Oscar Mateo | 6e51614 | 2017-04-10 07:34:31 -0700 | [diff] [blame] | 277 | #define INTEL_ENGINE_CS_MAX_NAME 8 |
| 278 | |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 279 | struct intel_engine_cs { |
| 280 | struct drm_i915_private *i915; |
Oscar Mateo | 6e51614 | 2017-04-10 07:34:31 -0700 | [diff] [blame] | 281 | char name[INTEL_ENGINE_CS_MAX_NAME]; |
Michal Wajdeczko | 237ae7c | 2017-03-01 20:26:15 +0000 | [diff] [blame] | 282 | enum intel_engine_id id; |
Chris Wilson | 1d39f28 | 2017-04-11 13:43:06 +0100 | [diff] [blame] | 283 | unsigned int uabi_id; |
Michal Wajdeczko | 237ae7c | 2017-03-01 20:26:15 +0000 | [diff] [blame] | 284 | unsigned int hw_id; |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 285 | unsigned int guc_id; |
Daniele Ceraolo Spurio | 0908180 | 2017-04-10 07:34:29 -0700 | [diff] [blame] | 286 | |
| 287 | u8 class; |
| 288 | u8 instance; |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 289 | u32 context_size; |
| 290 | u32 mmio_base; |
Dave Gordon | c2c7f24 | 2016-07-13 16:03:35 +0100 | [diff] [blame] | 291 | unsigned int irq_shift; |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 292 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 293 | struct intel_ring *buffer; |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 294 | struct intel_timeline *timeline; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 295 | |
Chris Wilson | 4e50f08 | 2016-10-28 13:58:31 +0100 | [diff] [blame] | 296 | struct intel_render_state *render_state; |
| 297 | |
Chris Wilson | 2246bea | 2017-02-17 15:13:00 +0000 | [diff] [blame] | 298 | atomic_t irq_count; |
Chris Wilson | 538b257 | 2017-01-24 15:18:05 +0000 | [diff] [blame] | 299 | unsigned long irq_posted; |
| 300 | #define ENGINE_IRQ_BREADCRUMB 0 |
Chris Wilson | f747026 | 2017-01-24 15:20:21 +0000 | [diff] [blame] | 301 | #define ENGINE_IRQ_EXECLIST 1 |
Chris Wilson | 538b257 | 2017-01-24 15:18:05 +0000 | [diff] [blame] | 302 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 303 | /* Rather than have every client wait upon all user interrupts, |
| 304 | * with the herd waking after every interrupt and each doing the |
| 305 | * heavyweight seqno dance, we delegate the task (of being the |
| 306 | * bottom-half of the user interrupt) to the first client. After |
| 307 | * every interrupt, we wake up one client, who does the heavyweight |
| 308 | * coherent seqno read and either goes back to sleep (if incomplete), |
| 309 | * or wakes up all the completed clients in parallel, before then |
| 310 | * transferring the bottom-half status to the next client in the queue. |
| 311 | * |
| 312 | * Compared to walking the entire list of waiters in a single dedicated |
| 313 | * bottom-half, we reduce the latency of the first waiter by avoiding |
| 314 | * a context switch, but incur additional coherent seqno reads when |
| 315 | * following the chain of request breadcrumbs. Since it is most likely |
| 316 | * that we have a single client waiting on each seqno, then reducing |
| 317 | * the overhead of waking that client is much preferred. |
| 318 | */ |
| 319 | struct intel_breadcrumbs { |
Chris Wilson | 61d3dc7 | 2017-03-03 19:08:24 +0000 | [diff] [blame] | 320 | spinlock_t irq_lock; /* protects irq_*; irqsafe */ |
| 321 | struct intel_wait *irq_wait; /* oldest waiter by retirement */ |
| 322 | |
| 323 | spinlock_t rb_lock; /* protects the rb and wraps irq_lock */ |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 324 | struct rb_root waiters; /* sorted by retirement, priority */ |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 325 | struct rb_root signals; /* sorted by retirement */ |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 326 | struct task_struct *signaler; /* used for fence signalling */ |
Chris Wilson | cced5e2 | 2017-02-23 07:44:15 +0000 | [diff] [blame] | 327 | struct drm_i915_gem_request __rcu *first_signal; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 328 | struct timer_list fake_irq; /* used after a missed interrupt */ |
Chris Wilson | 83348ba | 2016-08-09 17:47:51 +0100 | [diff] [blame] | 329 | struct timer_list hangcheck; /* detect missed interrupts */ |
| 330 | |
Chris Wilson | 2246bea | 2017-02-17 15:13:00 +0000 | [diff] [blame] | 331 | unsigned int hangcheck_interrupts; |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 332 | |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame] | 333 | bool irq_armed : 1; |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 334 | bool irq_enabled : 1; |
Chris Wilson | f97fbf9 | 2017-02-13 17:15:14 +0000 | [diff] [blame] | 335 | I915_SELFTEST_DECLARE(bool mock : 1); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 336 | } breadcrumbs; |
| 337 | |
Chris Wilson | 06fbca7 | 2015-04-07 16:20:36 +0100 | [diff] [blame] | 338 | /* |
| 339 | * A pool of objects to use as shadow copies of client batch buffers |
| 340 | * when the command parser is enabled. Prevents the client from |
| 341 | * modifying the batch contents after software parsing. |
| 342 | */ |
| 343 | struct i915_gem_batch_pool batch_pool; |
| 344 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 345 | struct intel_hw_status_page status_page; |
Arun Siluvery | 17ee950 | 2015-06-19 19:07:01 +0100 | [diff] [blame] | 346 | struct i915_ctx_workarounds wa_ctx; |
Chris Wilson | 56c0f1a | 2016-08-15 10:48:58 +0100 | [diff] [blame] | 347 | struct i915_vma *scratch; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 348 | |
Chris Wilson | 61ff75a | 2016-07-01 17:23:28 +0100 | [diff] [blame] | 349 | u32 irq_keep_mask; /* always keep these interrupts */ |
| 350 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
Dave Gordon | 38a0f2d | 2016-07-20 18:16:06 +0100 | [diff] [blame] | 351 | void (*irq_enable)(struct intel_engine_cs *engine); |
| 352 | void (*irq_disable)(struct intel_engine_cs *engine); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 353 | |
Dave Gordon | 38a0f2d | 2016-07-20 18:16:06 +0100 | [diff] [blame] | 354 | int (*init_hw)(struct intel_engine_cs *engine); |
Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 355 | void (*reset_hw)(struct intel_engine_cs *engine, |
| 356 | struct drm_i915_gem_request *req); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 357 | |
Chris Wilson | ff44ad5 | 2017-03-16 17:13:03 +0000 | [diff] [blame] | 358 | void (*set_default_submission)(struct intel_engine_cs *engine); |
| 359 | |
Chris Wilson | 266a240 | 2017-05-04 10:33:08 +0100 | [diff] [blame] | 360 | struct intel_ring *(*context_pin)(struct intel_engine_cs *engine, |
| 361 | struct i915_gem_context *ctx); |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 362 | void (*context_unpin)(struct intel_engine_cs *engine, |
| 363 | struct i915_gem_context *ctx); |
Chris Wilson | f73e739 | 2016-12-18 15:37:24 +0000 | [diff] [blame] | 364 | int (*request_alloc)(struct drm_i915_gem_request *req); |
John Harrison | 8753181 | 2015-05-29 17:43:44 +0100 | [diff] [blame] | 365 | int (*init_context)(struct drm_i915_gem_request *req); |
Arun Siluvery | 86d7f23 | 2014-08-26 14:44:50 +0100 | [diff] [blame] | 366 | |
Chris Wilson | ddd66c5 | 2016-08-02 22:50:31 +0100 | [diff] [blame] | 367 | int (*emit_flush)(struct drm_i915_gem_request *request, |
| 368 | u32 mode); |
| 369 | #define EMIT_INVALIDATE BIT(0) |
| 370 | #define EMIT_FLUSH BIT(1) |
| 371 | #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) |
| 372 | int (*emit_bb_start)(struct drm_i915_gem_request *req, |
| 373 | u64 offset, u32 length, |
| 374 | unsigned int dispatch_flags); |
| 375 | #define I915_DISPATCH_SECURE BIT(0) |
| 376 | #define I915_DISPATCH_PINNED BIT(1) |
| 377 | #define I915_DISPATCH_RS BIT(2) |
Chris Wilson | caddfe7 | 2016-10-28 13:58:52 +0100 | [diff] [blame] | 378 | void (*emit_breadcrumb)(struct drm_i915_gem_request *req, |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 379 | u32 *cs); |
Chris Wilson | 98f29e8 | 2016-10-28 13:58:51 +0100 | [diff] [blame] | 380 | int emit_breadcrumb_sz; |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 381 | |
| 382 | /* Pass the request to the hardware queue (e.g. directly into |
| 383 | * the legacy ringbuffer or to the end of an execlist). |
| 384 | * |
| 385 | * This is called from an atomic context with irqs disabled; must |
| 386 | * be irq safe. |
| 387 | */ |
Chris Wilson | ddd66c5 | 2016-08-02 22:50:31 +0100 | [diff] [blame] | 388 | void (*submit_request)(struct drm_i915_gem_request *req); |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 389 | |
Chris Wilson | 0de9136 | 2016-11-14 20:41:01 +0000 | [diff] [blame] | 390 | /* Call when the priority on a request has changed and it and its |
| 391 | * dependencies may need rescheduling. Note the request itself may |
| 392 | * not be ready to run! |
| 393 | * |
| 394 | * Called under the struct_mutex. |
| 395 | */ |
| 396 | void (*schedule)(struct drm_i915_gem_request *request, |
| 397 | int priority); |
| 398 | |
Chris Wilson | 27a5f61 | 2017-09-15 18:31:00 +0100 | [diff] [blame] | 399 | /* |
| 400 | * Cancel all requests on the hardware, or queued for execution. |
| 401 | * This should only cancel the ready requests that have been |
| 402 | * submitted to the engine (via the engine->submit_request callback). |
| 403 | * This is called when marking the device as wedged. |
| 404 | */ |
| 405 | void (*cancel_requests)(struct intel_engine_cs *engine); |
| 406 | |
Chris Wilson | b2eadbc | 2012-08-09 10:58:30 +0100 | [diff] [blame] | 407 | /* Some chipsets are not quite as coherent as advertised and need |
| 408 | * an expensive kick to force a true read of the up-to-date seqno. |
| 409 | * However, the up-to-date seqno is not always required and the last |
| 410 | * seen value is good enough. Note that the seqno will always be |
| 411 | * monotonic, even if not coherent. |
| 412 | */ |
Dave Gordon | 38a0f2d | 2016-07-20 18:16:06 +0100 | [diff] [blame] | 413 | void (*irq_seqno_barrier)(struct intel_engine_cs *engine); |
Dave Gordon | 38a0f2d | 2016-07-20 18:16:06 +0100 | [diff] [blame] | 414 | void (*cleanup)(struct intel_engine_cs *engine); |
Ben Widawsky | ebc348b | 2014-04-29 14:52:28 -0700 | [diff] [blame] | 415 | |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 416 | /* GEN8 signal/wait table - never trust comments! |
| 417 | * signal to signal to signal to signal to signal to |
| 418 | * RCS VCS BCS VECS VCS2 |
| 419 | * -------------------------------------------------------------------- |
| 420 | * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | |
| 421 | * |------------------------------------------------------------------- |
| 422 | * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | |
| 423 | * |------------------------------------------------------------------- |
| 424 | * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | |
| 425 | * |------------------------------------------------------------------- |
| 426 | * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | |
| 427 | * |------------------------------------------------------------------- |
| 428 | * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | |
| 429 | * |------------------------------------------------------------------- |
| 430 | * |
| 431 | * Generalization: |
| 432 | * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) |
| 433 | * ie. transpose of g(x, y) |
| 434 | * |
| 435 | * sync from sync from sync from sync from sync from |
| 436 | * RCS VCS BCS VECS VCS2 |
| 437 | * -------------------------------------------------------------------- |
| 438 | * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | |
| 439 | * |------------------------------------------------------------------- |
| 440 | * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | |
| 441 | * |------------------------------------------------------------------- |
| 442 | * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | |
| 443 | * |------------------------------------------------------------------- |
| 444 | * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | |
| 445 | * |------------------------------------------------------------------- |
| 446 | * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | |
| 447 | * |------------------------------------------------------------------- |
| 448 | * |
| 449 | * Generalization: |
| 450 | * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) |
| 451 | * ie. transpose of f(x, y) |
| 452 | */ |
Ben Widawsky | ebc348b | 2014-04-29 14:52:28 -0700 | [diff] [blame] | 453 | struct { |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 454 | union { |
Tvrtko Ursulin | 318f89c | 2016-08-16 17:04:21 +0100 | [diff] [blame] | 455 | #define GEN6_SEMAPHORE_LAST VECS_HW |
| 456 | #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1) |
| 457 | #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0) |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 458 | struct { |
| 459 | /* our mbox written by others */ |
Tvrtko Ursulin | 318f89c | 2016-08-16 17:04:21 +0100 | [diff] [blame] | 460 | u32 wait[GEN6_NUM_SEMAPHORES]; |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 461 | /* mboxes this ring signals to */ |
Tvrtko Ursulin | 318f89c | 2016-08-16 17:04:21 +0100 | [diff] [blame] | 462 | i915_reg_t signal[GEN6_NUM_SEMAPHORES]; |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 463 | } mbox; |
Tvrtko Ursulin | 666796d | 2016-03-16 11:00:39 +0000 | [diff] [blame] | 464 | u64 signal_ggtt[I915_NUM_ENGINES]; |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 465 | }; |
Ben Widawsky | 78325f2 | 2014-04-29 14:52:29 -0700 | [diff] [blame] | 466 | |
| 467 | /* AKA wait() */ |
Chris Wilson | ad7bdb2 | 2016-08-02 22:50:40 +0100 | [diff] [blame] | 468 | int (*sync_to)(struct drm_i915_gem_request *req, |
| 469 | struct drm_i915_gem_request *signal); |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 470 | u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs); |
Ben Widawsky | ebc348b | 2014-04-29 14:52:28 -0700 | [diff] [blame] | 471 | } semaphore; |
Ben Widawsky | ad776f8 | 2013-05-28 19:22:18 -0700 | [diff] [blame] | 472 | |
Mika Kuoppala | b620e87 | 2017-09-22 15:43:03 +0300 | [diff] [blame] | 473 | struct intel_engine_execlists execlists; |
Oscar Mateo | 4da46e1 | 2014-07-24 17:04:27 +0100 | [diff] [blame] | 474 | |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 475 | /* Contexts are pinned whilst they are active on the GPU. The last |
| 476 | * context executed remains active whilst the GPU is idle - the |
| 477 | * switch away and write to the context object only occurs on the |
| 478 | * next execution. Contexts are only unpinned on retirement of the |
| 479 | * following request ensuring that we can always write to the object |
| 480 | * on the context switch even after idling. Across suspend, we switch |
| 481 | * to the kernel context and trash it as the save may not happen |
| 482 | * before the hardware is powered down. |
| 483 | */ |
| 484 | struct i915_gem_context *last_retired_context; |
| 485 | |
| 486 | /* We track the current MI_SET_CONTEXT in order to eliminate |
| 487 | * redudant context switches. This presumes that requests are not |
| 488 | * reordered! Or when they are the tracking is updated along with |
| 489 | * the emission of individual requests into the legacy command |
| 490 | * stream (ring). |
| 491 | */ |
| 492 | struct i915_gem_context *legacy_active_context; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 493 | |
Changbin Du | 3fc0306 | 2017-03-13 10:47:11 +0800 | [diff] [blame] | 494 | /* status_notifier: list of callbacks for context-switch changes */ |
| 495 | struct atomic_notifier_head context_status_notifier; |
| 496 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 497 | struct intel_engine_hangcheck hangcheck; |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 498 | |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 499 | bool needs_cmd_parser; |
| 500 | |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 501 | /* |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 502 | * Table of commands the command parser needs to know about |
Chris Wilson | 33a051a | 2016-07-27 09:07:26 +0100 | [diff] [blame] | 503 | * for this engine. |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 504 | */ |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 505 | DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 506 | |
| 507 | /* |
| 508 | * Table of registers allowed in commands that read/write registers. |
| 509 | */ |
Jordan Justen | 361b027 | 2016-03-06 23:30:27 -0800 | [diff] [blame] | 510 | const struct drm_i915_reg_table *reg_tables; |
| 511 | int reg_table_count; |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 512 | |
| 513 | /* |
| 514 | * Returns the bitmask for the length field of the specified command. |
| 515 | * Return 0 for an unrecognized/invalid command. |
| 516 | * |
Chris Wilson | 33a051a | 2016-07-27 09:07:26 +0100 | [diff] [blame] | 517 | * If the command parser finds an entry for a command in the engine's |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 518 | * cmd_tables, it gets the command's length based on the table entry. |
Chris Wilson | 33a051a | 2016-07-27 09:07:26 +0100 | [diff] [blame] | 519 | * If not, it calls this function to determine the per-engine length |
| 520 | * field encoding for the command (i.e. different opcode ranges use |
| 521 | * certain bits to encode the command length in the header). |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 522 | */ |
| 523 | u32 (*get_cmd_length_mask)(u32 cmd_header); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 524 | }; |
| 525 | |
Mika Kuoppala | 76e7008 | 2017-09-22 15:43:07 +0300 | [diff] [blame] | 526 | static inline unsigned int |
| 527 | execlists_num_ports(const struct intel_engine_execlists * const execlists) |
| 528 | { |
| 529 | return execlists->port_mask + 1; |
| 530 | } |
| 531 | |
Mika Kuoppala | 7a62cc6 | 2017-09-22 15:43:06 +0300 | [diff] [blame] | 532 | static inline void |
| 533 | execlists_port_complete(struct intel_engine_execlists * const execlists, |
| 534 | struct execlist_port * const port) |
| 535 | { |
Mika Kuoppala | 76e7008 | 2017-09-22 15:43:07 +0300 | [diff] [blame] | 536 | const unsigned int m = execlists->port_mask; |
Mika Kuoppala | 7a62cc6 | 2017-09-22 15:43:06 +0300 | [diff] [blame] | 537 | |
| 538 | GEM_BUG_ON(port_index(port, execlists) != 0); |
| 539 | |
Mika Kuoppala | 76e7008 | 2017-09-22 15:43:07 +0300 | [diff] [blame] | 540 | memmove(port, port + 1, m * sizeof(struct execlist_port)); |
| 541 | memset(port + m, 0, sizeof(struct execlist_port)); |
Mika Kuoppala | 7a62cc6 | 2017-09-22 15:43:06 +0300 | [diff] [blame] | 542 | } |
| 543 | |
Chris Wilson | 59ce131 | 2017-03-24 16:35:40 +0000 | [diff] [blame] | 544 | static inline unsigned int |
Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 545 | intel_engine_flag(const struct intel_engine_cs *engine) |
Daniel Vetter | 96154f2 | 2011-12-14 13:57:00 +0100 | [diff] [blame] | 546 | { |
Chris Wilson | 59ce131 | 2017-03-24 16:35:40 +0000 | [diff] [blame] | 547 | return BIT(engine->id); |
Daniel Vetter | 96154f2 | 2011-12-14 13:57:00 +0100 | [diff] [blame] | 548 | } |
| 549 | |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 550 | static inline u32 |
Chris Wilson | 5dd8e50 | 2016-04-09 10:57:57 +0100 | [diff] [blame] | 551 | intel_read_status_page(struct intel_engine_cs *engine, int reg) |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 552 | { |
Daniel Vetter | 4225d0f | 2012-04-26 23:28:16 +0200 | [diff] [blame] | 553 | /* Ensure that the compiler doesn't optimize away the load. */ |
Chris Wilson | 5dd8e50 | 2016-04-09 10:57:57 +0100 | [diff] [blame] | 554 | return READ_ONCE(engine->status_page.page_addr[reg]); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 555 | } |
| 556 | |
Mika Kuoppala | b70ec5b | 2012-12-19 11:13:05 +0200 | [diff] [blame] | 557 | static inline void |
Chris Wilson | 9a29dd8 | 2017-03-24 16:35:38 +0000 | [diff] [blame] | 558 | intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) |
Mika Kuoppala | b70ec5b | 2012-12-19 11:13:05 +0200 | [diff] [blame] | 559 | { |
Chris Wilson | 9a29dd8 | 2017-03-24 16:35:38 +0000 | [diff] [blame] | 560 | /* Writing into the status page should be done sparingly. Since |
| 561 | * we do when we are uncertain of the device state, we take a bit |
| 562 | * of extra paranoia to try and ensure that the HWS takes the value |
| 563 | * we give and that it doesn't end up trapped inside the CPU! |
| 564 | */ |
| 565 | if (static_cpu_has(X86_FEATURE_CLFLUSH)) { |
| 566 | mb(); |
| 567 | clflush(&engine->status_page.page_addr[reg]); |
| 568 | engine->status_page.page_addr[reg] = value; |
| 569 | clflush(&engine->status_page.page_addr[reg]); |
| 570 | mb(); |
| 571 | } else { |
| 572 | WRITE_ONCE(engine->status_page.page_addr[reg], value); |
| 573 | } |
Mika Kuoppala | b70ec5b | 2012-12-19 11:13:05 +0200 | [diff] [blame] | 574 | } |
| 575 | |
Jani Nikula | e282891 | 2016-01-18 09:19:47 +0200 | [diff] [blame] | 576 | /* |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 577 | * Reads a dword out of the status page, which is written to from the command |
| 578 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or |
| 579 | * MI_STORE_DATA_IMM. |
| 580 | * |
| 581 | * The following dwords have a reserved meaning: |
| 582 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. |
| 583 | * 0x04: ring 0 head pointer |
| 584 | * 0x05: ring 1 head pointer (915-class) |
| 585 | * 0x06: ring 2 head pointer (915-class) |
| 586 | * 0x10-0x1b: Context status DWords (GM45) |
| 587 | * 0x1f: Last written status offset. (GM45) |
Thomas Daniel | b07da53 | 2015-02-18 11:48:21 +0000 | [diff] [blame] | 588 | * 0x20-0x2f: Reserved (Gen6+) |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 589 | * |
Thomas Daniel | b07da53 | 2015-02-18 11:48:21 +0000 | [diff] [blame] | 590 | * The area from dword 0x30 to 0x3ff is available for driver usage. |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 591 | */ |
Thomas Daniel | b07da53 | 2015-02-18 11:48:21 +0000 | [diff] [blame] | 592 | #define I915_GEM_HWS_INDEX 0x30 |
Chris Wilson | 7c17d37 | 2016-01-20 15:43:35 +0200 | [diff] [blame] | 593 | #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
Thomas Daniel | b07da53 | 2015-02-18 11:48:21 +0000 | [diff] [blame] | 594 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 |
Jesse Barnes | 9a28977 | 2012-10-26 09:42:42 -0700 | [diff] [blame] | 595 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 596 | |
Chris Wilson | 6d2cb5a | 2017-09-13 14:35:34 +0100 | [diff] [blame] | 597 | #define I915_HWS_CSB_BUF0_INDEX 0x10 |
Chris Wilson | 767a983 | 2017-09-13 09:56:05 +0100 | [diff] [blame] | 598 | #define I915_HWS_CSB_WRITE_INDEX 0x1f |
| 599 | #define CNL_HWS_CSB_WRITE_INDEX 0x2f |
Chris Wilson | 6d2cb5a | 2017-09-13 14:35:34 +0100 | [diff] [blame] | 600 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 601 | struct intel_ring * |
| 602 | intel_engine_create_ring(struct intel_engine_cs *engine, int size); |
Chris Wilson | d822bb1 | 2017-04-03 12:34:25 +0100 | [diff] [blame] | 603 | int intel_ring_pin(struct intel_ring *ring, |
| 604 | struct drm_i915_private *i915, |
| 605 | unsigned int offset_bias); |
Chris Wilson | e6ba999 | 2017-04-25 14:00:49 +0100 | [diff] [blame] | 606 | void intel_ring_reset(struct intel_ring *ring, u32 tail); |
Chris Wilson | 95aebcb | 2017-05-04 14:08:45 +0100 | [diff] [blame] | 607 | unsigned int intel_ring_update_space(struct intel_ring *ring); |
Chris Wilson | aad29fb | 2016-08-02 22:50:23 +0100 | [diff] [blame] | 608 | void intel_ring_unpin(struct intel_ring *ring); |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 609 | void intel_ring_free(struct intel_ring *ring); |
Oscar Mateo | 84c2377 | 2014-07-24 17:04:15 +0100 | [diff] [blame] | 610 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 611 | void intel_engine_stop(struct intel_engine_cs *engine); |
| 612 | void intel_engine_cleanup(struct intel_engine_cs *engine); |
Ben Widawsky | 96f298a | 2011-03-19 18:14:27 -0700 | [diff] [blame] | 613 | |
Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 614 | void intel_legacy_submission_resume(struct drm_i915_private *dev_priv); |
| 615 | |
John Harrison | bba09b1 | 2015-05-29 17:44:06 +0100 | [diff] [blame] | 616 | int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); |
Chris Wilson | 406ea8d | 2016-07-20 13:31:55 +0100 | [diff] [blame] | 617 | |
Chris Wilson | 5e5655c | 2017-05-04 14:08:46 +0100 | [diff] [blame] | 618 | u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, |
| 619 | unsigned int n); |
Chris Wilson | 406ea8d | 2016-07-20 13:31:55 +0100 | [diff] [blame] | 620 | |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 621 | static inline void |
| 622 | intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs) |
Chris Wilson | 0924673 | 2013-08-10 22:16:32 +0100 | [diff] [blame] | 623 | { |
Chris Wilson | 8f94201 | 2016-08-02 22:50:30 +0100 | [diff] [blame] | 624 | /* Dummy function. |
| 625 | * |
| 626 | * This serves as a placeholder in the code so that the reader |
| 627 | * can compare against the preceding intel_ring_begin() and |
| 628 | * check that the number of dwords emitted matches the space |
| 629 | * reserved for the command packet (i.e. the value passed to |
| 630 | * intel_ring_begin()). |
Chris Wilson | c5efa1a | 2016-08-02 22:50:29 +0100 | [diff] [blame] | 631 | */ |
Chris Wilson | e6ba999 | 2017-04-25 14:00:49 +0100 | [diff] [blame] | 632 | GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs); |
Chris Wilson | 8f94201 | 2016-08-02 22:50:30 +0100 | [diff] [blame] | 633 | } |
| 634 | |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 635 | static inline u32 |
Chris Wilson | 450362d | 2017-03-27 14:00:07 +0100 | [diff] [blame] | 636 | intel_ring_wrap(const struct intel_ring *ring, u32 pos) |
| 637 | { |
| 638 | return pos & (ring->size - 1); |
| 639 | } |
| 640 | |
| 641 | static inline u32 |
| 642 | intel_ring_offset(const struct drm_i915_gem_request *req, void *addr) |
Chris Wilson | 8f94201 | 2016-08-02 22:50:30 +0100 | [diff] [blame] | 643 | { |
| 644 | /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 645 | u32 offset = addr - req->ring->vaddr; |
| 646 | GEM_BUG_ON(offset > req->ring->size); |
Chris Wilson | 450362d | 2017-03-27 14:00:07 +0100 | [diff] [blame] | 647 | return intel_ring_wrap(req->ring, offset); |
Chris Wilson | 0924673 | 2013-08-10 22:16:32 +0100 | [diff] [blame] | 648 | } |
Chris Wilson | 406ea8d | 2016-07-20 13:31:55 +0100 | [diff] [blame] | 649 | |
Chris Wilson | ed1501d | 2017-03-27 14:14:12 +0100 | [diff] [blame] | 650 | static inline void |
| 651 | assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) |
| 652 | { |
| 653 | /* We could combine these into a single tail operation, but keeping |
| 654 | * them as seperate tests will help identify the cause should one |
| 655 | * ever fire. |
| 656 | */ |
| 657 | GEM_BUG_ON(!IS_ALIGNED(tail, 8)); |
| 658 | GEM_BUG_ON(tail >= ring->size); |
Chris Wilson | 605d5b3 | 2017-05-04 14:08:44 +0100 | [diff] [blame] | 659 | |
| 660 | /* |
| 661 | * "Ring Buffer Use" |
| 662 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 |
| 663 | * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5 |
| 664 | * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5 |
| 665 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the |
| 666 | * same cacheline, the Head Pointer must not be greater than the Tail |
| 667 | * Pointer." |
| 668 | * |
| 669 | * We use ring->head as the last known location of the actual RING_HEAD, |
| 670 | * it may have advanced but in the worst case it is equally the same |
| 671 | * as ring->head and so we should never program RING_TAIL to advance |
| 672 | * into the same cacheline as ring->head. |
| 673 | */ |
| 674 | #define cacheline(a) round_down(a, CACHELINE_BYTES) |
| 675 | GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) && |
| 676 | tail < ring->head); |
| 677 | #undef cacheline |
Chris Wilson | ed1501d | 2017-03-27 14:14:12 +0100 | [diff] [blame] | 678 | } |
| 679 | |
Chris Wilson | e6ba999 | 2017-04-25 14:00:49 +0100 | [diff] [blame] | 680 | static inline unsigned int |
| 681 | intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) |
| 682 | { |
| 683 | /* Whilst writes to the tail are strictly order, there is no |
| 684 | * serialisation between readers and the writers. The tail may be |
| 685 | * read by i915_gem_request_retire() just as it is being updated |
| 686 | * by execlists, as although the breadcrumb is complete, the context |
| 687 | * switch hasn't been seen. |
| 688 | */ |
| 689 | assert_ring_tail_valid(ring, tail); |
| 690 | ring->tail = tail; |
| 691 | return tail; |
| 692 | } |
Chris Wilson | 0924673 | 2013-08-10 22:16:32 +0100 | [diff] [blame] | 693 | |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 694 | void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 695 | |
Tvrtko Ursulin | 019bf27 | 2016-07-13 16:03:41 +0100 | [diff] [blame] | 696 | void intel_engine_setup_common(struct intel_engine_cs *engine); |
| 697 | int intel_engine_init_common(struct intel_engine_cs *engine); |
Chris Wilson | adc320c | 2016-08-15 10:48:59 +0100 | [diff] [blame] | 698 | int intel_engine_create_scratch(struct intel_engine_cs *engine, int size); |
Chris Wilson | 96a945a | 2016-08-03 13:19:16 +0100 | [diff] [blame] | 699 | void intel_engine_cleanup_common(struct intel_engine_cs *engine); |
Tvrtko Ursulin | 019bf27 | 2016-07-13 16:03:41 +0100 | [diff] [blame] | 700 | |
Tvrtko Ursulin | 8b3e2d3 | 2016-07-13 16:03:37 +0100 | [diff] [blame] | 701 | int intel_init_render_ring_buffer(struct intel_engine_cs *engine); |
| 702 | int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); |
Tvrtko Ursulin | 8b3e2d3 | 2016-07-13 16:03:37 +0100 | [diff] [blame] | 703 | int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); |
| 704 | int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 705 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 706 | u64 intel_engine_get_active_head(struct intel_engine_cs *engine); |
Chris Wilson | 1b36595 | 2016-10-04 21:11:31 +0100 | [diff] [blame] | 707 | u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine); |
| 708 | |
Chris Wilson | 1b7744e | 2016-07-01 17:23:17 +0100 | [diff] [blame] | 709 | static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) |
| 710 | { |
| 711 | return intel_read_status_page(engine, I915_GEM_HWS_INDEX); |
| 712 | } |
Daniel Vetter | 79f321b | 2010-09-24 21:20:10 +0200 | [diff] [blame] | 713 | |
Chris Wilson | cb399ea | 2016-11-01 10:03:16 +0000 | [diff] [blame] | 714 | static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine) |
| 715 | { |
| 716 | /* We are only peeking at the tail of the submit queue (and not the |
| 717 | * queue itself) in order to gain a hint as to the current active |
| 718 | * state of the engine. Callers are not expected to be taking |
| 719 | * engine->timeline->lock, nor are they expected to be concerned |
| 720 | * wtih serialising this hint with anything, so document it as |
| 721 | * a hint and nothing more. |
| 722 | */ |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 723 | return READ_ONCE(engine->timeline->seqno); |
Chris Wilson | cb399ea | 2016-11-01 10:03:16 +0000 | [diff] [blame] | 724 | } |
| 725 | |
Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 726 | int init_workarounds_ring(struct intel_engine_cs *engine); |
Tvrtko Ursulin | 4ac9659 | 2017-02-14 15:00:17 +0000 | [diff] [blame] | 727 | int intel_ring_workarounds_emit(struct drm_i915_gem_request *req); |
Michel Thierry | 771b9a5 | 2014-11-11 16:47:33 +0000 | [diff] [blame] | 728 | |
Chris Wilson | 0e70447 | 2016-10-12 10:05:17 +0100 | [diff] [blame] | 729 | void intel_engine_get_instdone(struct intel_engine_cs *engine, |
| 730 | struct intel_instdone *instdone); |
| 731 | |
John Harrison | 29b1b41 | 2015-06-18 13:10:09 +0100 | [diff] [blame] | 732 | /* |
| 733 | * Arbitrary size for largest possible 'add request' sequence. The code paths |
| 734 | * are complex and variable. Empirical measurement shows that the worst case |
Chris Wilson | 596e5ef | 2016-04-29 09:07:04 +0100 | [diff] [blame] | 735 | * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However, |
| 736 | * we need to allocate double the largest single packet within that emission |
| 737 | * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW). |
John Harrison | 29b1b41 | 2015-06-18 13:10:09 +0100 | [diff] [blame] | 738 | */ |
Chris Wilson | 596e5ef | 2016-04-29 09:07:04 +0100 | [diff] [blame] | 739 | #define MIN_SPACE_FOR_ADD_REQUEST 336 |
John Harrison | 29b1b41 | 2015-06-18 13:10:09 +0100 | [diff] [blame] | 740 | |
Chris Wilson | a58c01a | 2016-04-29 13:18:21 +0100 | [diff] [blame] | 741 | static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) |
| 742 | { |
Chris Wilson | 57e8853 | 2016-08-15 10:48:57 +0100 | [diff] [blame] | 743 | return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; |
Chris Wilson | a58c01a | 2016-04-29 13:18:21 +0100 | [diff] [blame] | 744 | } |
| 745 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 746 | /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 747 | int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); |
| 748 | |
Chris Wilson | 56299fb | 2017-02-27 20:58:48 +0000 | [diff] [blame] | 749 | static inline void intel_wait_init(struct intel_wait *wait, |
| 750 | struct drm_i915_gem_request *rq) |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 751 | { |
| 752 | wait->tsk = current; |
Chris Wilson | 56299fb | 2017-02-27 20:58:48 +0000 | [diff] [blame] | 753 | wait->request = rq; |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 754 | } |
| 755 | |
| 756 | static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno) |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 757 | { |
| 758 | wait->tsk = current; |
| 759 | wait->seqno = seqno; |
| 760 | } |
| 761 | |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 762 | static inline bool intel_wait_has_seqno(const struct intel_wait *wait) |
| 763 | { |
| 764 | return wait->seqno; |
| 765 | } |
| 766 | |
| 767 | static inline bool |
| 768 | intel_wait_update_seqno(struct intel_wait *wait, u32 seqno) |
| 769 | { |
| 770 | wait->seqno = seqno; |
| 771 | return intel_wait_has_seqno(wait); |
| 772 | } |
| 773 | |
| 774 | static inline bool |
| 775 | intel_wait_update_request(struct intel_wait *wait, |
| 776 | const struct drm_i915_gem_request *rq) |
| 777 | { |
| 778 | return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq)); |
| 779 | } |
| 780 | |
| 781 | static inline bool |
| 782 | intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno) |
| 783 | { |
| 784 | return wait->seqno == seqno; |
| 785 | } |
| 786 | |
| 787 | static inline bool |
| 788 | intel_wait_check_request(const struct intel_wait *wait, |
| 789 | const struct drm_i915_gem_request *rq) |
| 790 | { |
| 791 | return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq)); |
| 792 | } |
| 793 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 794 | static inline bool intel_wait_complete(const struct intel_wait *wait) |
| 795 | { |
| 796 | return RB_EMPTY_NODE(&wait->node); |
| 797 | } |
| 798 | |
| 799 | bool intel_engine_add_wait(struct intel_engine_cs *engine, |
| 800 | struct intel_wait *wait); |
| 801 | void intel_engine_remove_wait(struct intel_engine_cs *engine, |
| 802 | struct intel_wait *wait); |
Chris Wilson | f7b02a5 | 2017-04-26 09:06:59 +0100 | [diff] [blame] | 803 | void intel_engine_enable_signaling(struct drm_i915_gem_request *request, |
| 804 | bool wakeup); |
Chris Wilson | 9eb143b | 2017-02-23 07:44:16 +0000 | [diff] [blame] | 805 | void intel_engine_cancel_signaling(struct drm_i915_gem_request *request); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 806 | |
Chris Wilson | dbd6ef2 | 2016-08-09 17:47:52 +0100 | [diff] [blame] | 807 | static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 808 | { |
Chris Wilson | 61d3dc7 | 2017-03-03 19:08:24 +0000 | [diff] [blame] | 809 | return READ_ONCE(engine->breadcrumbs.irq_wait); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 810 | } |
| 811 | |
Chris Wilson | 8d769ea | 2017-02-27 20:58:47 +0000 | [diff] [blame] | 812 | unsigned int intel_engine_wakeup(struct intel_engine_cs *engine); |
| 813 | #define ENGINE_WAKEUP_WAITER BIT(0) |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame] | 814 | #define ENGINE_WAKEUP_ASLEEP BIT(1) |
| 815 | |
| 816 | void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); |
| 817 | void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 818 | |
Chris Wilson | ad07dfc | 2016-10-07 07:53:26 +0100 | [diff] [blame] | 819 | void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 820 | void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 821 | bool intel_breadcrumbs_busy(struct intel_engine_cs *engine); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 822 | |
Tvrtko Ursulin | 9f235df | 2017-02-16 12:23:25 +0000 | [diff] [blame] | 823 | static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) |
| 824 | { |
| 825 | memset(batch, 0, 6 * sizeof(u32)); |
| 826 | |
| 827 | batch[0] = GFX_OP_PIPE_CONTROL(6); |
| 828 | batch[1] = flags; |
| 829 | batch[2] = offset; |
| 830 | |
| 831 | return batch + 6; |
| 832 | } |
| 833 | |
Chris Wilson | 5400367 | 2017-03-03 12:19:46 +0000 | [diff] [blame] | 834 | bool intel_engine_is_idle(struct intel_engine_cs *engine); |
Chris Wilson | 0542524 | 2017-03-03 12:19:47 +0000 | [diff] [blame] | 835 | bool intel_engines_are_idle(struct drm_i915_private *dev_priv); |
Chris Wilson | 5400367 | 2017-03-03 12:19:46 +0000 | [diff] [blame] | 836 | |
Chris Wilson | 6c06757 | 2017-05-17 13:10:03 +0100 | [diff] [blame] | 837 | void intel_engines_mark_idle(struct drm_i915_private *i915); |
Chris Wilson | ff44ad5 | 2017-03-16 17:13:03 +0000 | [diff] [blame] | 838 | void intel_engines_reset_default_submission(struct drm_i915_private *i915); |
| 839 | |
Chris Wilson | 90cad09 | 2017-09-06 16:28:59 +0100 | [diff] [blame] | 840 | bool intel_engine_can_store_dword(struct intel_engine_cs *engine); |
Chris Wilson | f2f5c06 | 2017-08-16 09:52:04 +0100 | [diff] [blame] | 841 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 842 | #endif /* _INTEL_RINGBUFFER_H_ */ |