Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1 | #ifndef _INTEL_RINGBUFFER_H_ |
| 2 | #define _INTEL_RINGBUFFER_H_ |
| 3 | |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 4 | #include <linux/hashtable.h> |
Chris Wilson | 06fbca7 | 2015-04-07 16:20:36 +0100 | [diff] [blame] | 5 | #include "i915_gem_batch_pool.h" |
Chris Wilson | dcff85c | 2016-08-05 10:14:11 +0100 | [diff] [blame] | 6 | #include "i915_gem_request.h" |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 7 | #include "i915_gem_timeline.h" |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 8 | |
| 9 | #define I915_CMD_HASH_ORDER 9 |
| 10 | |
Oscar Mateo | 4712274 | 2014-07-24 17:04:28 +0100 | [diff] [blame] | 11 | /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, |
| 12 | * but keeps the logic simple. Indeed, the whole purpose of this macro is just |
| 13 | * to give some inclination as to some of the magic values used in the various |
| 14 | * workarounds! |
| 15 | */ |
| 16 | #define CACHELINE_BYTES 64 |
Arun Siluvery | 17ee950 | 2015-06-19 19:07:01 +0100 | [diff] [blame] | 17 | #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) |
Oscar Mateo | 4712274 | 2014-07-24 17:04:28 +0100 | [diff] [blame] | 18 | |
Ville Syrjälä | 633cf8f | 2012-12-03 18:43:32 +0200 | [diff] [blame] | 19 | /* |
| 20 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" |
| 21 | * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" |
| 22 | * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" |
| 23 | * |
| 24 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same |
| 25 | * cacheline, the Head Pointer must not be greater than the Tail |
| 26 | * Pointer." |
| 27 | */ |
| 28 | #define I915_RING_FREE_SPACE 64 |
| 29 | |
Chris Wilson | 57e8853 | 2016-08-15 10:48:57 +0100 | [diff] [blame] | 30 | struct intel_hw_status_page { |
| 31 | struct i915_vma *vma; |
| 32 | u32 *page_addr; |
| 33 | u32 ggtt_offset; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 34 | }; |
| 35 | |
Dave Gordon | bbdc070a | 2016-07-20 18:16:05 +0100 | [diff] [blame] | 36 | #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base)) |
| 37 | #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 38 | |
Dave Gordon | bbdc070a | 2016-07-20 18:16:05 +0100 | [diff] [blame] | 39 | #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base)) |
| 40 | #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 41 | |
Dave Gordon | bbdc070a | 2016-07-20 18:16:05 +0100 | [diff] [blame] | 42 | #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base)) |
| 43 | #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 44 | |
Dave Gordon | bbdc070a | 2016-07-20 18:16:05 +0100 | [diff] [blame] | 45 | #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base)) |
| 46 | #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 47 | |
Dave Gordon | bbdc070a | 2016-07-20 18:16:05 +0100 | [diff] [blame] | 48 | #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base)) |
| 49 | #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val) |
Daniel Vetter | 870e86d | 2010-08-02 16:29:44 +0200 | [diff] [blame] | 50 | |
Dave Gordon | bbdc070a | 2016-07-20 18:16:05 +0100 | [diff] [blame] | 51 | #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base)) |
| 52 | #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val) |
Naresh Kumar Kachhi | e9fea57 | 2014-03-12 16:39:41 +0530 | [diff] [blame] | 53 | |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 54 | /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to |
| 55 | * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. |
| 56 | */ |
Chris Wilson | 8c12672 | 2016-04-07 07:29:14 +0100 | [diff] [blame] | 57 | #define gen8_semaphore_seqno_size sizeof(uint64_t) |
| 58 | #define GEN8_SEMAPHORE_OFFSET(__from, __to) \ |
| 59 | (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size) |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 60 | #define GEN8_SIGNAL_OFFSET(__ring, to) \ |
Chris Wilson | 51d545d | 2016-08-15 10:49:02 +0100 | [diff] [blame] | 61 | (dev_priv->semaphore->node.start + \ |
Chris Wilson | 8c12672 | 2016-04-07 07:29:14 +0100 | [diff] [blame] | 62 | GEN8_SEMAPHORE_OFFSET((__ring)->id, (to))) |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 63 | #define GEN8_WAIT_OFFSET(__ring, from) \ |
Chris Wilson | 51d545d | 2016-08-15 10:49:02 +0100 | [diff] [blame] | 64 | (dev_priv->semaphore->node.start + \ |
Chris Wilson | 8c12672 | 2016-04-07 07:29:14 +0100 | [diff] [blame] | 65 | GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 66 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 67 | enum intel_engine_hangcheck_action { |
Mika Kuoppala | 3fe3b03 | 2016-11-18 15:09:04 +0200 | [diff] [blame] | 68 | ENGINE_IDLE = 0, |
| 69 | ENGINE_WAIT, |
| 70 | ENGINE_ACTIVE_SEQNO, |
| 71 | ENGINE_ACTIVE_HEAD, |
| 72 | ENGINE_ACTIVE_SUBUNITS, |
| 73 | ENGINE_WAIT_KICK, |
| 74 | ENGINE_DEAD, |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 75 | }; |
Mika Kuoppala | ad8beae | 2013-06-12 12:35:32 +0300 | [diff] [blame] | 76 | |
Mika Kuoppala | 3fe3b03 | 2016-11-18 15:09:04 +0200 | [diff] [blame] | 77 | static inline const char * |
| 78 | hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) |
| 79 | { |
| 80 | switch (a) { |
| 81 | case ENGINE_IDLE: |
| 82 | return "idle"; |
| 83 | case ENGINE_WAIT: |
| 84 | return "wait"; |
| 85 | case ENGINE_ACTIVE_SEQNO: |
| 86 | return "active seqno"; |
| 87 | case ENGINE_ACTIVE_HEAD: |
| 88 | return "active head"; |
| 89 | case ENGINE_ACTIVE_SUBUNITS: |
| 90 | return "active subunits"; |
| 91 | case ENGINE_WAIT_KICK: |
| 92 | return "wait kick"; |
| 93 | case ENGINE_DEAD: |
| 94 | return "dead"; |
| 95 | } |
| 96 | |
| 97 | return "unknown"; |
| 98 | } |
Mika Kuoppala | b6b0fac | 2014-01-30 19:04:43 +0200 | [diff] [blame] | 99 | |
Ben Widawsky | f9e6137 | 2016-09-20 16:54:33 +0300 | [diff] [blame] | 100 | #define I915_MAX_SLICES 3 |
| 101 | #define I915_MAX_SUBSLICES 3 |
| 102 | |
| 103 | #define instdone_slice_mask(dev_priv__) \ |
| 104 | (INTEL_GEN(dev_priv__) == 7 ? \ |
| 105 | 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask) |
| 106 | |
| 107 | #define instdone_subslice_mask(dev_priv__) \ |
| 108 | (INTEL_GEN(dev_priv__) == 7 ? \ |
| 109 | 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask) |
| 110 | |
| 111 | #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ |
| 112 | for ((slice__) = 0, (subslice__) = 0; \ |
| 113 | (slice__) < I915_MAX_SLICES; \ |
| 114 | (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ |
| 115 | (slice__) += ((subslice__) == 0)) \ |
| 116 | for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ |
| 117 | (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) |
| 118 | |
Ben Widawsky | d636951 | 2016-09-20 16:54:32 +0300 | [diff] [blame] | 119 | struct intel_instdone { |
| 120 | u32 instdone; |
| 121 | /* The following exist only in the RCS engine */ |
| 122 | u32 slice_common; |
Ben Widawsky | f9e6137 | 2016-09-20 16:54:33 +0300 | [diff] [blame] | 123 | u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES]; |
| 124 | u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; |
Ben Widawsky | d636951 | 2016-09-20 16:54:32 +0300 | [diff] [blame] | 125 | }; |
| 126 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 127 | struct intel_engine_hangcheck { |
Chris Wilson | 5087744 | 2014-03-21 12:41:53 +0000 | [diff] [blame] | 128 | u64 acthd; |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 129 | u32 seqno; |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 130 | enum intel_engine_hangcheck_action action; |
Mika Kuoppala | 3fe3b03 | 2016-11-18 15:09:04 +0200 | [diff] [blame] | 131 | unsigned long action_timestamp; |
Chris Wilson | 4be1738 | 2014-06-06 10:22:29 +0100 | [diff] [blame] | 132 | int deadlock; |
Ben Widawsky | d636951 | 2016-09-20 16:54:32 +0300 | [diff] [blame] | 133 | struct intel_instdone instdone; |
Mika Kuoppala | 3fe3b03 | 2016-11-18 15:09:04 +0200 | [diff] [blame] | 134 | bool stalled; |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 135 | }; |
| 136 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 137 | struct intel_ring { |
Tvrtko Ursulin | 0eb973d | 2016-01-15 15:10:28 +0000 | [diff] [blame] | 138 | struct i915_vma *vma; |
Chris Wilson | 57e8853 | 2016-08-15 10:48:57 +0100 | [diff] [blame] | 139 | void *vaddr; |
Oscar Mateo | 8ee1497 | 2014-05-22 14:13:34 +0100 | [diff] [blame] | 140 | |
Tvrtko Ursulin | 4a570db | 2016-03-16 11:00:38 +0000 | [diff] [blame] | 141 | struct intel_engine_cs *engine; |
Daniel Vetter | 0c7dd53 | 2014-08-11 16:17:44 +0200 | [diff] [blame] | 142 | |
Chris Wilson | 675d9ad | 2016-08-04 07:52:36 +0100 | [diff] [blame] | 143 | struct list_head request_list; |
| 144 | |
Oscar Mateo | 8ee1497 | 2014-05-22 14:13:34 +0100 | [diff] [blame] | 145 | u32 head; |
| 146 | u32 tail; |
| 147 | int space; |
| 148 | int size; |
| 149 | int effective_size; |
| 150 | |
| 151 | /** We track the position of the requests in the ring buffer, and |
| 152 | * when each is retired we increment last_retired_head as the GPU |
| 153 | * must have finished processing the request and so we know we |
| 154 | * can advance the ringbuffer up to that position. |
| 155 | * |
| 156 | * last_retired_head is set to -1 after the value is consumed so |
| 157 | * we can detect new retirements. |
| 158 | */ |
| 159 | u32 last_retired_head; |
| 160 | }; |
| 161 | |
Chris Wilson | e2efd13 | 2016-05-24 14:53:34 +0100 | [diff] [blame] | 162 | struct i915_gem_context; |
Jordan Justen | 361b027 | 2016-03-06 23:30:27 -0800 | [diff] [blame] | 163 | struct drm_i915_reg_table; |
Nick Hoath | 2107637 | 2015-01-15 13:10:38 +0000 | [diff] [blame] | 164 | |
Arun Siluvery | 17ee950 | 2015-06-19 19:07:01 +0100 | [diff] [blame] | 165 | /* |
| 166 | * we use a single page to load ctx workarounds so all of these |
| 167 | * values are referred in terms of dwords |
| 168 | * |
| 169 | * struct i915_wa_ctx_bb: |
| 170 | * offset: specifies batch starting position, also helpful in case |
| 171 | * if we want to have multiple batches at different offsets based on |
| 172 | * some criteria. It is not a requirement at the moment but provides |
| 173 | * an option for future use. |
| 174 | * size: size of the batch in DWORDS |
| 175 | */ |
Chris Wilson | 48bb74e | 2016-08-15 10:49:04 +0100 | [diff] [blame] | 176 | struct i915_ctx_workarounds { |
Arun Siluvery | 17ee950 | 2015-06-19 19:07:01 +0100 | [diff] [blame] | 177 | struct i915_wa_ctx_bb { |
| 178 | u32 offset; |
| 179 | u32 size; |
| 180 | } indirect_ctx, per_ctx; |
Chris Wilson | 48bb74e | 2016-08-15 10:49:04 +0100 | [diff] [blame] | 181 | struct i915_vma *vma; |
Arun Siluvery | 17ee950 | 2015-06-19 19:07:01 +0100 | [diff] [blame] | 182 | }; |
| 183 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 184 | struct drm_i915_gem_request; |
Chris Wilson | 4e50f08 | 2016-10-28 13:58:31 +0100 | [diff] [blame] | 185 | struct intel_render_state; |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 186 | |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 187 | struct intel_engine_cs { |
| 188 | struct drm_i915_private *i915; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 189 | const char *name; |
Tvrtko Ursulin | 117897f | 2016-03-16 11:00:40 +0000 | [diff] [blame] | 190 | enum intel_engine_id { |
Tvrtko Ursulin | de1add3 | 2016-01-15 15:12:50 +0000 | [diff] [blame] | 191 | RCS = 0, |
Daniel Vetter | 96154f2 | 2011-12-14 13:57:00 +0100 | [diff] [blame] | 192 | BCS, |
Tvrtko Ursulin | de1add3 | 2016-01-15 15:12:50 +0000 | [diff] [blame] | 193 | VCS, |
| 194 | VCS2, /* Keep instances of the same type engine together. */ |
| 195 | VECS |
Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 196 | } id; |
Tvrtko Ursulin | de1add3 | 2016-01-15 15:12:50 +0000 | [diff] [blame] | 197 | #define _VCS(n) (VCS + (n)) |
Chris Wilson | 426960b | 2016-01-15 16:51:46 +0000 | [diff] [blame] | 198 | unsigned int exec_id; |
Tvrtko Ursulin | 5ec2cf7 | 2016-08-16 17:04:20 +0100 | [diff] [blame] | 199 | enum intel_engine_hw_id { |
| 200 | RCS_HW = 0, |
| 201 | VCS_HW, |
| 202 | BCS_HW, |
| 203 | VECS_HW, |
| 204 | VCS2_HW |
| 205 | } hw_id; |
| 206 | enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */ |
Daniel Vetter | 333e9fe | 2010-08-02 16:24:01 +0200 | [diff] [blame] | 207 | u32 mmio_base; |
Dave Gordon | c2c7f24 | 2016-07-13 16:03:35 +0100 | [diff] [blame] | 208 | unsigned int irq_shift; |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 209 | struct intel_ring *buffer; |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 210 | struct intel_timeline *timeline; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 211 | |
Chris Wilson | 4e50f08 | 2016-10-28 13:58:31 +0100 | [diff] [blame] | 212 | struct intel_render_state *render_state; |
| 213 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 214 | /* Rather than have every client wait upon all user interrupts, |
| 215 | * with the herd waking after every interrupt and each doing the |
| 216 | * heavyweight seqno dance, we delegate the task (of being the |
| 217 | * bottom-half of the user interrupt) to the first client. After |
| 218 | * every interrupt, we wake up one client, who does the heavyweight |
| 219 | * coherent seqno read and either goes back to sleep (if incomplete), |
| 220 | * or wakes up all the completed clients in parallel, before then |
| 221 | * transferring the bottom-half status to the next client in the queue. |
| 222 | * |
| 223 | * Compared to walking the entire list of waiters in a single dedicated |
| 224 | * bottom-half, we reduce the latency of the first waiter by avoiding |
| 225 | * a context switch, but incur additional coherent seqno reads when |
| 226 | * following the chain of request breadcrumbs. Since it is most likely |
| 227 | * that we have a single client waiting on each seqno, then reducing |
| 228 | * the overhead of waking that client is much preferred. |
| 229 | */ |
| 230 | struct intel_breadcrumbs { |
Chris Wilson | dbd6ef2 | 2016-08-09 17:47:52 +0100 | [diff] [blame] | 231 | struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */ |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 232 | bool irq_posted; |
| 233 | |
Chris Wilson | f6168e3 | 2016-10-28 13:58:55 +0100 | [diff] [blame] | 234 | spinlock_t lock; /* protects the lists of requests; irqsafe */ |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 235 | struct rb_root waiters; /* sorted by retirement, priority */ |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 236 | struct rb_root signals; /* sorted by retirement */ |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 237 | struct intel_wait *first_wait; /* oldest waiter by retirement */ |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 238 | struct task_struct *signaler; /* used for fence signalling */ |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 239 | struct drm_i915_gem_request *first_signal; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 240 | struct timer_list fake_irq; /* used after a missed interrupt */ |
Chris Wilson | 83348ba | 2016-08-09 17:47:51 +0100 | [diff] [blame] | 241 | struct timer_list hangcheck; /* detect missed interrupts */ |
| 242 | |
| 243 | unsigned long timeout; |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 244 | |
| 245 | bool irq_enabled : 1; |
| 246 | bool rpm_wakelock : 1; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 247 | } breadcrumbs; |
| 248 | |
Chris Wilson | 06fbca7 | 2015-04-07 16:20:36 +0100 | [diff] [blame] | 249 | /* |
| 250 | * A pool of objects to use as shadow copies of client batch buffers |
| 251 | * when the command parser is enabled. Prevents the client from |
| 252 | * modifying the batch contents after software parsing. |
| 253 | */ |
| 254 | struct i915_gem_batch_pool batch_pool; |
| 255 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 256 | struct intel_hw_status_page status_page; |
Arun Siluvery | 17ee950 | 2015-06-19 19:07:01 +0100 | [diff] [blame] | 257 | struct i915_ctx_workarounds wa_ctx; |
Chris Wilson | 56c0f1a | 2016-08-15 10:48:58 +0100 | [diff] [blame] | 258 | struct i915_vma *scratch; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 259 | |
Chris Wilson | 61ff75a | 2016-07-01 17:23:28 +0100 | [diff] [blame] | 260 | u32 irq_keep_mask; /* always keep these interrupts */ |
| 261 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
Dave Gordon | 38a0f2d | 2016-07-20 18:16:06 +0100 | [diff] [blame] | 262 | void (*irq_enable)(struct intel_engine_cs *engine); |
| 263 | void (*irq_disable)(struct intel_engine_cs *engine); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 264 | |
Dave Gordon | 38a0f2d | 2016-07-20 18:16:06 +0100 | [diff] [blame] | 265 | int (*init_hw)(struct intel_engine_cs *engine); |
Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 266 | void (*reset_hw)(struct intel_engine_cs *engine, |
| 267 | struct drm_i915_gem_request *req); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 268 | |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 269 | int (*context_pin)(struct intel_engine_cs *engine, |
| 270 | struct i915_gem_context *ctx); |
| 271 | void (*context_unpin)(struct intel_engine_cs *engine, |
| 272 | struct i915_gem_context *ctx); |
Chris Wilson | f73e739 | 2016-12-18 15:37:24 +0000 | [diff] [blame] | 273 | int (*request_alloc)(struct drm_i915_gem_request *req); |
John Harrison | 8753181 | 2015-05-29 17:43:44 +0100 | [diff] [blame] | 274 | int (*init_context)(struct drm_i915_gem_request *req); |
Arun Siluvery | 86d7f23 | 2014-08-26 14:44:50 +0100 | [diff] [blame] | 275 | |
Chris Wilson | ddd66c5 | 2016-08-02 22:50:31 +0100 | [diff] [blame] | 276 | int (*emit_flush)(struct drm_i915_gem_request *request, |
| 277 | u32 mode); |
| 278 | #define EMIT_INVALIDATE BIT(0) |
| 279 | #define EMIT_FLUSH BIT(1) |
| 280 | #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) |
| 281 | int (*emit_bb_start)(struct drm_i915_gem_request *req, |
| 282 | u64 offset, u32 length, |
| 283 | unsigned int dispatch_flags); |
| 284 | #define I915_DISPATCH_SECURE BIT(0) |
| 285 | #define I915_DISPATCH_PINNED BIT(1) |
| 286 | #define I915_DISPATCH_RS BIT(2) |
Chris Wilson | caddfe7 | 2016-10-28 13:58:52 +0100 | [diff] [blame] | 287 | void (*emit_breadcrumb)(struct drm_i915_gem_request *req, |
| 288 | u32 *out); |
Chris Wilson | 98f29e8 | 2016-10-28 13:58:51 +0100 | [diff] [blame] | 289 | int emit_breadcrumb_sz; |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 290 | |
| 291 | /* Pass the request to the hardware queue (e.g. directly into |
| 292 | * the legacy ringbuffer or to the end of an execlist). |
| 293 | * |
| 294 | * This is called from an atomic context with irqs disabled; must |
| 295 | * be irq safe. |
| 296 | */ |
Chris Wilson | ddd66c5 | 2016-08-02 22:50:31 +0100 | [diff] [blame] | 297 | void (*submit_request)(struct drm_i915_gem_request *req); |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 298 | |
Chris Wilson | 0de9136 | 2016-11-14 20:41:01 +0000 | [diff] [blame] | 299 | /* Call when the priority on a request has changed and it and its |
| 300 | * dependencies may need rescheduling. Note the request itself may |
| 301 | * not be ready to run! |
| 302 | * |
| 303 | * Called under the struct_mutex. |
| 304 | */ |
| 305 | void (*schedule)(struct drm_i915_gem_request *request, |
| 306 | int priority); |
| 307 | |
Chris Wilson | b2eadbc | 2012-08-09 10:58:30 +0100 | [diff] [blame] | 308 | /* Some chipsets are not quite as coherent as advertised and need |
| 309 | * an expensive kick to force a true read of the up-to-date seqno. |
| 310 | * However, the up-to-date seqno is not always required and the last |
| 311 | * seen value is good enough. Note that the seqno will always be |
| 312 | * monotonic, even if not coherent. |
| 313 | */ |
Dave Gordon | 38a0f2d | 2016-07-20 18:16:06 +0100 | [diff] [blame] | 314 | void (*irq_seqno_barrier)(struct intel_engine_cs *engine); |
Dave Gordon | 38a0f2d | 2016-07-20 18:16:06 +0100 | [diff] [blame] | 315 | void (*cleanup)(struct intel_engine_cs *engine); |
Ben Widawsky | ebc348b | 2014-04-29 14:52:28 -0700 | [diff] [blame] | 316 | |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 317 | /* GEN8 signal/wait table - never trust comments! |
| 318 | * signal to signal to signal to signal to signal to |
| 319 | * RCS VCS BCS VECS VCS2 |
| 320 | * -------------------------------------------------------------------- |
| 321 | * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | |
| 322 | * |------------------------------------------------------------------- |
| 323 | * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | |
| 324 | * |------------------------------------------------------------------- |
| 325 | * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | |
| 326 | * |------------------------------------------------------------------- |
| 327 | * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | |
| 328 | * |------------------------------------------------------------------- |
| 329 | * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | |
| 330 | * |------------------------------------------------------------------- |
| 331 | * |
| 332 | * Generalization: |
| 333 | * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) |
| 334 | * ie. transpose of g(x, y) |
| 335 | * |
| 336 | * sync from sync from sync from sync from sync from |
| 337 | * RCS VCS BCS VECS VCS2 |
| 338 | * -------------------------------------------------------------------- |
| 339 | * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | |
| 340 | * |------------------------------------------------------------------- |
| 341 | * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | |
| 342 | * |------------------------------------------------------------------- |
| 343 | * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | |
| 344 | * |------------------------------------------------------------------- |
| 345 | * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | |
| 346 | * |------------------------------------------------------------------- |
| 347 | * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | |
| 348 | * |------------------------------------------------------------------- |
| 349 | * |
| 350 | * Generalization: |
| 351 | * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) |
| 352 | * ie. transpose of f(x, y) |
| 353 | */ |
Ben Widawsky | ebc348b | 2014-04-29 14:52:28 -0700 | [diff] [blame] | 354 | struct { |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 355 | union { |
Tvrtko Ursulin | 318f89c | 2016-08-16 17:04:21 +0100 | [diff] [blame] | 356 | #define GEN6_SEMAPHORE_LAST VECS_HW |
| 357 | #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1) |
| 358 | #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0) |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 359 | struct { |
| 360 | /* our mbox written by others */ |
Tvrtko Ursulin | 318f89c | 2016-08-16 17:04:21 +0100 | [diff] [blame] | 361 | u32 wait[GEN6_NUM_SEMAPHORES]; |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 362 | /* mboxes this ring signals to */ |
Tvrtko Ursulin | 318f89c | 2016-08-16 17:04:21 +0100 | [diff] [blame] | 363 | i915_reg_t signal[GEN6_NUM_SEMAPHORES]; |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 364 | } mbox; |
Tvrtko Ursulin | 666796d | 2016-03-16 11:00:39 +0000 | [diff] [blame] | 365 | u64 signal_ggtt[I915_NUM_ENGINES]; |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 366 | }; |
Ben Widawsky | 78325f2 | 2014-04-29 14:52:29 -0700 | [diff] [blame] | 367 | |
| 368 | /* AKA wait() */ |
Chris Wilson | ad7bdb2 | 2016-08-02 22:50:40 +0100 | [diff] [blame] | 369 | int (*sync_to)(struct drm_i915_gem_request *req, |
| 370 | struct drm_i915_gem_request *signal); |
Chris Wilson | caddfe7 | 2016-10-28 13:58:52 +0100 | [diff] [blame] | 371 | u32 *(*signal)(struct drm_i915_gem_request *req, u32 *out); |
Ben Widawsky | ebc348b | 2014-04-29 14:52:28 -0700 | [diff] [blame] | 372 | } semaphore; |
Ben Widawsky | ad776f8 | 2013-05-28 19:22:18 -0700 | [diff] [blame] | 373 | |
Oscar Mateo | 4da46e1 | 2014-07-24 17:04:27 +0100 | [diff] [blame] | 374 | /* Execlists */ |
Tvrtko Ursulin | 27af5ee | 2016-04-04 12:11:56 +0100 | [diff] [blame] | 375 | struct tasklet_struct irq_tasklet; |
Chris Wilson | 70c2a24 | 2016-09-09 14:11:46 +0100 | [diff] [blame] | 376 | struct execlist_port { |
| 377 | struct drm_i915_gem_request *request; |
| 378 | unsigned int count; |
| 379 | } execlist_port[2]; |
Chris Wilson | 20311bd | 2016-11-14 20:41:03 +0000 | [diff] [blame] | 380 | struct rb_root execlist_queue; |
| 381 | struct rb_node *execlist_first; |
Tvrtko Ursulin | 3756685 | 2016-04-12 14:37:31 +0100 | [diff] [blame] | 382 | unsigned int fw_domains; |
Tvrtko Ursulin | ca82580 | 2016-01-15 15:10:27 +0000 | [diff] [blame] | 383 | bool disable_lite_restore_wa; |
Chris Wilson | 70c2a24 | 2016-09-09 14:11:46 +0100 | [diff] [blame] | 384 | bool preempt_wa; |
Tvrtko Ursulin | ca82580 | 2016-01-15 15:10:27 +0000 | [diff] [blame] | 385 | u32 ctx_desc_template; |
Oscar Mateo | 4da46e1 | 2014-07-24 17:04:27 +0100 | [diff] [blame] | 386 | |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 387 | /* Contexts are pinned whilst they are active on the GPU. The last |
| 388 | * context executed remains active whilst the GPU is idle - the |
| 389 | * switch away and write to the context object only occurs on the |
| 390 | * next execution. Contexts are only unpinned on retirement of the |
| 391 | * following request ensuring that we can always write to the object |
| 392 | * on the context switch even after idling. Across suspend, we switch |
| 393 | * to the kernel context and trash it as the save may not happen |
| 394 | * before the hardware is powered down. |
| 395 | */ |
| 396 | struct i915_gem_context *last_retired_context; |
| 397 | |
| 398 | /* We track the current MI_SET_CONTEXT in order to eliminate |
| 399 | * redudant context switches. This presumes that requests are not |
| 400 | * reordered! Or when they are the tracking is updated along with |
| 401 | * the emission of individual requests into the legacy command |
| 402 | * stream (ring). |
| 403 | */ |
| 404 | struct i915_gem_context *legacy_active_context; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 405 | |
Changbin Du | 590379a | 2017-03-21 14:47:20 +0000 | [diff] [blame] | 406 | /* status_notifier: list of callbacks for context-switch changes */ |
| 407 | struct atomic_notifier_head context_status_notifier; |
| 408 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 409 | struct intel_engine_hangcheck hangcheck; |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 410 | |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 411 | bool needs_cmd_parser; |
| 412 | |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 413 | /* |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 414 | * Table of commands the command parser needs to know about |
Chris Wilson | 33a051a | 2016-07-27 09:07:26 +0100 | [diff] [blame] | 415 | * for this engine. |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 416 | */ |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 417 | DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 418 | |
| 419 | /* |
| 420 | * Table of registers allowed in commands that read/write registers. |
| 421 | */ |
Jordan Justen | 361b027 | 2016-03-06 23:30:27 -0800 | [diff] [blame] | 422 | const struct drm_i915_reg_table *reg_tables; |
| 423 | int reg_table_count; |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 424 | |
| 425 | /* |
| 426 | * Returns the bitmask for the length field of the specified command. |
| 427 | * Return 0 for an unrecognized/invalid command. |
| 428 | * |
Chris Wilson | 33a051a | 2016-07-27 09:07:26 +0100 | [diff] [blame] | 429 | * If the command parser finds an entry for a command in the engine's |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 430 | * cmd_tables, it gets the command's length based on the table entry. |
Chris Wilson | 33a051a | 2016-07-27 09:07:26 +0100 | [diff] [blame] | 431 | * If not, it calls this function to determine the per-engine length |
| 432 | * field encoding for the command (i.e. different opcode ranges use |
| 433 | * certain bits to encode the command length in the header). |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 434 | */ |
| 435 | u32 (*get_cmd_length_mask)(u32 cmd_header); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 436 | }; |
| 437 | |
Daniel Vetter | 96154f2 | 2011-12-14 13:57:00 +0100 | [diff] [blame] | 438 | static inline unsigned |
Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 439 | intel_engine_flag(const struct intel_engine_cs *engine) |
Daniel Vetter | 96154f2 | 2011-12-14 13:57:00 +0100 | [diff] [blame] | 440 | { |
Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 441 | return 1 << engine->id; |
Daniel Vetter | 96154f2 | 2011-12-14 13:57:00 +0100 | [diff] [blame] | 442 | } |
| 443 | |
Imre Deak | 319404d | 2015-08-14 18:35:27 +0300 | [diff] [blame] | 444 | static inline void |
Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 445 | intel_flush_status_page(struct intel_engine_cs *engine, int reg) |
Imre Deak | 319404d | 2015-08-14 18:35:27 +0300 | [diff] [blame] | 446 | { |
Chris Wilson | 0d317ce | 2016-04-09 10:57:56 +0100 | [diff] [blame] | 447 | mb(); |
| 448 | clflush(&engine->status_page.page_addr[reg]); |
| 449 | mb(); |
Imre Deak | 319404d | 2015-08-14 18:35:27 +0300 | [diff] [blame] | 450 | } |
| 451 | |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 452 | static inline u32 |
Chris Wilson | 5dd8e50 | 2016-04-09 10:57:57 +0100 | [diff] [blame] | 453 | intel_read_status_page(struct intel_engine_cs *engine, int reg) |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 454 | { |
Daniel Vetter | 4225d0f | 2012-04-26 23:28:16 +0200 | [diff] [blame] | 455 | /* Ensure that the compiler doesn't optimize away the load. */ |
Chris Wilson | 5dd8e50 | 2016-04-09 10:57:57 +0100 | [diff] [blame] | 456 | return READ_ONCE(engine->status_page.page_addr[reg]); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 457 | } |
| 458 | |
Mika Kuoppala | b70ec5b | 2012-12-19 11:13:05 +0200 | [diff] [blame] | 459 | static inline void |
Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 460 | intel_write_status_page(struct intel_engine_cs *engine, |
Mika Kuoppala | b70ec5b | 2012-12-19 11:13:05 +0200 | [diff] [blame] | 461 | int reg, u32 value) |
| 462 | { |
Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 463 | engine->status_page.page_addr[reg] = value; |
Mika Kuoppala | b70ec5b | 2012-12-19 11:13:05 +0200 | [diff] [blame] | 464 | } |
| 465 | |
Jani Nikula | e282891 | 2016-01-18 09:19:47 +0200 | [diff] [blame] | 466 | /* |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 467 | * Reads a dword out of the status page, which is written to from the command |
| 468 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or |
| 469 | * MI_STORE_DATA_IMM. |
| 470 | * |
| 471 | * The following dwords have a reserved meaning: |
| 472 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. |
| 473 | * 0x04: ring 0 head pointer |
| 474 | * 0x05: ring 1 head pointer (915-class) |
| 475 | * 0x06: ring 2 head pointer (915-class) |
| 476 | * 0x10-0x1b: Context status DWords (GM45) |
| 477 | * 0x1f: Last written status offset. (GM45) |
Thomas Daniel | b07da53 | 2015-02-18 11:48:21 +0000 | [diff] [blame] | 478 | * 0x20-0x2f: Reserved (Gen6+) |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 479 | * |
Thomas Daniel | b07da53 | 2015-02-18 11:48:21 +0000 | [diff] [blame] | 480 | * The area from dword 0x30 to 0x3ff is available for driver usage. |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 481 | */ |
Thomas Daniel | b07da53 | 2015-02-18 11:48:21 +0000 | [diff] [blame] | 482 | #define I915_GEM_HWS_INDEX 0x30 |
Chris Wilson | 7c17d37 | 2016-01-20 15:43:35 +0200 | [diff] [blame] | 483 | #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
Thomas Daniel | b07da53 | 2015-02-18 11:48:21 +0000 | [diff] [blame] | 484 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 |
Jesse Barnes | 9a28977 | 2012-10-26 09:42:42 -0700 | [diff] [blame] | 485 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 486 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 487 | struct intel_ring * |
| 488 | intel_engine_create_ring(struct intel_engine_cs *engine, int size); |
Daniele Ceraolo Spurio | d3ef1af | 2016-12-23 15:56:21 -0800 | [diff] [blame] | 489 | int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); |
Chris Wilson | aad29fb | 2016-08-02 22:50:23 +0100 | [diff] [blame] | 490 | void intel_ring_unpin(struct intel_ring *ring); |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 491 | void intel_ring_free(struct intel_ring *ring); |
Oscar Mateo | 84c2377 | 2014-07-24 17:04:15 +0100 | [diff] [blame] | 492 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 493 | void intel_engine_stop(struct intel_engine_cs *engine); |
| 494 | void intel_engine_cleanup(struct intel_engine_cs *engine); |
Ben Widawsky | 96f298a | 2011-03-19 18:14:27 -0700 | [diff] [blame] | 495 | |
Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 496 | void intel_legacy_submission_resume(struct drm_i915_private *dev_priv); |
| 497 | |
John Harrison | 5fb9de1 | 2015-05-29 17:44:07 +0100 | [diff] [blame] | 498 | int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); |
John Harrison | bba09b1 | 2015-05-29 17:44:06 +0100 | [diff] [blame] | 499 | int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); |
Chris Wilson | 406ea8d | 2016-07-20 13:31:55 +0100 | [diff] [blame] | 500 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 501 | static inline void intel_ring_emit(struct intel_ring *ring, u32 data) |
Chris Wilson | e898cd2 | 2010-08-04 15:18:14 +0100 | [diff] [blame] | 502 | { |
Chris Wilson | b5321f3 | 2016-08-02 22:50:18 +0100 | [diff] [blame] | 503 | *(uint32_t *)(ring->vaddr + ring->tail) = data; |
| 504 | ring->tail += 4; |
Chris Wilson | e898cd2 | 2010-08-04 15:18:14 +0100 | [diff] [blame] | 505 | } |
Chris Wilson | 406ea8d | 2016-07-20 13:31:55 +0100 | [diff] [blame] | 506 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 507 | static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg) |
Ville Syrjälä | f92a916 | 2015-11-04 23:20:07 +0200 | [diff] [blame] | 508 | { |
Chris Wilson | b5321f3 | 2016-08-02 22:50:18 +0100 | [diff] [blame] | 509 | intel_ring_emit(ring, i915_mmio_reg_offset(reg)); |
Ville Syrjälä | f92a916 | 2015-11-04 23:20:07 +0200 | [diff] [blame] | 510 | } |
Chris Wilson | 406ea8d | 2016-07-20 13:31:55 +0100 | [diff] [blame] | 511 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 512 | static inline void intel_ring_advance(struct intel_ring *ring) |
Chris Wilson | 0924673 | 2013-08-10 22:16:32 +0100 | [diff] [blame] | 513 | { |
Chris Wilson | 8f94201 | 2016-08-02 22:50:30 +0100 | [diff] [blame] | 514 | /* Dummy function. |
| 515 | * |
| 516 | * This serves as a placeholder in the code so that the reader |
| 517 | * can compare against the preceding intel_ring_begin() and |
| 518 | * check that the number of dwords emitted matches the space |
| 519 | * reserved for the command packet (i.e. the value passed to |
| 520 | * intel_ring_begin()). |
Chris Wilson | c5efa1a | 2016-08-02 22:50:29 +0100 | [diff] [blame] | 521 | */ |
Chris Wilson | 8f94201 | 2016-08-02 22:50:30 +0100 | [diff] [blame] | 522 | } |
| 523 | |
Chris Wilson | dd68f2b | 2017-03-29 13:13:15 +0100 | [diff] [blame] | 524 | static inline u32 |
| 525 | intel_ring_wrap(const struct intel_ring *ring, u32 pos) |
| 526 | { |
| 527 | return pos & (ring->size - 1); |
| 528 | } |
| 529 | |
Chris Wilson | caddfe7 | 2016-10-28 13:58:52 +0100 | [diff] [blame] | 530 | static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr) |
Chris Wilson | 8f94201 | 2016-08-02 22:50:30 +0100 | [diff] [blame] | 531 | { |
| 532 | /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ |
Chris Wilson | caddfe7 | 2016-10-28 13:58:52 +0100 | [diff] [blame] | 533 | u32 offset = addr - ring->vaddr; |
Chris Wilson | dd68f2b | 2017-03-29 13:13:15 +0100 | [diff] [blame] | 534 | return intel_ring_wrap(ring, offset); |
Chris Wilson | 0924673 | 2013-08-10 22:16:32 +0100 | [diff] [blame] | 535 | } |
Chris Wilson | 406ea8d | 2016-07-20 13:31:55 +0100 | [diff] [blame] | 536 | |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 537 | int __intel_ring_space(int head, int tail, int size); |
Chris Wilson | 32c04f1 | 2016-08-02 22:50:22 +0100 | [diff] [blame] | 538 | void intel_ring_update_space(struct intel_ring *ring); |
Chris Wilson | 0924673 | 2013-08-10 22:16:32 +0100 | [diff] [blame] | 539 | |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 540 | void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 541 | |
Tvrtko Ursulin | 019bf27 | 2016-07-13 16:03:41 +0100 | [diff] [blame] | 542 | void intel_engine_setup_common(struct intel_engine_cs *engine); |
| 543 | int intel_engine_init_common(struct intel_engine_cs *engine); |
Chris Wilson | adc320c | 2016-08-15 10:48:59 +0100 | [diff] [blame] | 544 | int intel_engine_create_scratch(struct intel_engine_cs *engine, int size); |
Chris Wilson | 96a945a | 2016-08-03 13:19:16 +0100 | [diff] [blame] | 545 | void intel_engine_cleanup_common(struct intel_engine_cs *engine); |
Tvrtko Ursulin | 019bf27 | 2016-07-13 16:03:41 +0100 | [diff] [blame] | 546 | |
Tvrtko Ursulin | 8b3e2d3 | 2016-07-13 16:03:37 +0100 | [diff] [blame] | 547 | int intel_init_render_ring_buffer(struct intel_engine_cs *engine); |
| 548 | int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); |
| 549 | int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine); |
| 550 | int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); |
| 551 | int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 552 | |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 553 | u64 intel_engine_get_active_head(struct intel_engine_cs *engine); |
Chris Wilson | 1b36595 | 2016-10-04 21:11:31 +0100 | [diff] [blame] | 554 | u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine); |
| 555 | |
Chris Wilson | 1b7744e | 2016-07-01 17:23:17 +0100 | [diff] [blame] | 556 | static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) |
| 557 | { |
| 558 | return intel_read_status_page(engine, I915_GEM_HWS_INDEX); |
| 559 | } |
Daniel Vetter | 79f321b | 2010-09-24 21:20:10 +0200 | [diff] [blame] | 560 | |
Chris Wilson | cb399ea | 2016-11-01 10:03:16 +0000 | [diff] [blame] | 561 | static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine) |
| 562 | { |
| 563 | /* We are only peeking at the tail of the submit queue (and not the |
| 564 | * queue itself) in order to gain a hint as to the current active |
| 565 | * state of the engine. Callers are not expected to be taking |
| 566 | * engine->timeline->lock, nor are they expected to be concerned |
| 567 | * wtih serialising this hint with anything, so document it as |
| 568 | * a hint and nothing more. |
| 569 | */ |
| 570 | return READ_ONCE(engine->timeline->last_submitted_seqno); |
| 571 | } |
| 572 | |
Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 573 | int init_workarounds_ring(struct intel_engine_cs *engine); |
Michel Thierry | 771b9a5 | 2014-11-11 16:47:33 +0000 | [diff] [blame] | 574 | |
Chris Wilson | 0e70447 | 2016-10-12 10:05:17 +0100 | [diff] [blame] | 575 | void intel_engine_get_instdone(struct intel_engine_cs *engine, |
| 576 | struct intel_instdone *instdone); |
| 577 | |
John Harrison | 29b1b41 | 2015-06-18 13:10:09 +0100 | [diff] [blame] | 578 | /* |
| 579 | * Arbitrary size for largest possible 'add request' sequence. The code paths |
| 580 | * are complex and variable. Empirical measurement shows that the worst case |
Chris Wilson | 596e5ef | 2016-04-29 09:07:04 +0100 | [diff] [blame] | 581 | * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However, |
| 582 | * we need to allocate double the largest single packet within that emission |
| 583 | * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW). |
John Harrison | 29b1b41 | 2015-06-18 13:10:09 +0100 | [diff] [blame] | 584 | */ |
Chris Wilson | 596e5ef | 2016-04-29 09:07:04 +0100 | [diff] [blame] | 585 | #define MIN_SPACE_FOR_ADD_REQUEST 336 |
John Harrison | 29b1b41 | 2015-06-18 13:10:09 +0100 | [diff] [blame] | 586 | |
Chris Wilson | a58c01a | 2016-04-29 13:18:21 +0100 | [diff] [blame] | 587 | static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) |
| 588 | { |
Chris Wilson | 57e8853 | 2016-08-15 10:48:57 +0100 | [diff] [blame] | 589 | return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; |
Chris Wilson | a58c01a | 2016-04-29 13:18:21 +0100 | [diff] [blame] | 590 | } |
| 591 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 592 | /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 593 | int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); |
| 594 | |
| 595 | static inline void intel_wait_init(struct intel_wait *wait, u32 seqno) |
| 596 | { |
| 597 | wait->tsk = current; |
| 598 | wait->seqno = seqno; |
| 599 | } |
| 600 | |
| 601 | static inline bool intel_wait_complete(const struct intel_wait *wait) |
| 602 | { |
| 603 | return RB_EMPTY_NODE(&wait->node); |
| 604 | } |
| 605 | |
| 606 | bool intel_engine_add_wait(struct intel_engine_cs *engine, |
| 607 | struct intel_wait *wait); |
| 608 | void intel_engine_remove_wait(struct intel_engine_cs *engine, |
| 609 | struct intel_wait *wait); |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 610 | void intel_engine_enable_signaling(struct drm_i915_gem_request *request); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 611 | |
Chris Wilson | dbd6ef2 | 2016-08-09 17:47:52 +0100 | [diff] [blame] | 612 | static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 613 | { |
Chris Wilson | dbd6ef2 | 2016-08-09 17:47:52 +0100 | [diff] [blame] | 614 | return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 615 | } |
| 616 | |
Chris Wilson | dbd6ef2 | 2016-08-09 17:47:52 +0100 | [diff] [blame] | 617 | static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine) |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 618 | { |
| 619 | bool wakeup = false; |
Chris Wilson | dbd6ef2 | 2016-08-09 17:47:52 +0100 | [diff] [blame] | 620 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 621 | /* Note that for this not to dangerously chase a dangling pointer, |
Chris Wilson | dbd6ef2 | 2016-08-09 17:47:52 +0100 | [diff] [blame] | 622 | * we must hold the rcu_read_lock here. |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 623 | * |
| 624 | * Also note that tsk is likely to be in !TASK_RUNNING state so an |
| 625 | * early test for tsk->state != TASK_RUNNING before wake_up_process() |
| 626 | * is unlikely to be beneficial. |
| 627 | */ |
Chris Wilson | dbd6ef2 | 2016-08-09 17:47:52 +0100 | [diff] [blame] | 628 | if (intel_engine_has_waiter(engine)) { |
| 629 | struct task_struct *tsk; |
| 630 | |
| 631 | rcu_read_lock(); |
| 632 | tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh); |
| 633 | if (tsk) |
| 634 | wakeup = wake_up_process(tsk); |
| 635 | rcu_read_unlock(); |
| 636 | } |
| 637 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 638 | return wakeup; |
| 639 | } |
| 640 | |
Chris Wilson | ad07dfc | 2016-10-07 07:53:26 +0100 | [diff] [blame] | 641 | void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 642 | void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); |
Chris Wilson | 6a5d1db | 2016-11-08 14:37:19 +0000 | [diff] [blame] | 643 | unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 644 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 645 | #endif /* _INTEL_RINGBUFFER_H_ */ |