Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1 | #ifndef _INTEL_RINGBUFFER_H_ |
| 2 | #define _INTEL_RINGBUFFER_H_ |
| 3 | |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 4 | #include <linux/hashtable.h> |
Chris Wilson | 06fbca7 | 2015-04-07 16:20:36 +0100 | [diff] [blame] | 5 | #include "i915_gem_batch_pool.h" |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 6 | |
| 7 | #define I915_CMD_HASH_ORDER 9 |
| 8 | |
Oscar Mateo | 4712274 | 2014-07-24 17:04:28 +0100 | [diff] [blame] | 9 | /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, |
| 10 | * but keeps the logic simple. Indeed, the whole purpose of this macro is just |
| 11 | * to give some inclination as to some of the magic values used in the various |
| 12 | * workarounds! |
| 13 | */ |
| 14 | #define CACHELINE_BYTES 64 |
Arun Siluvery | 17ee950 | 2015-06-19 19:07:01 +0100 | [diff] [blame] | 15 | #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) |
Oscar Mateo | 4712274 | 2014-07-24 17:04:28 +0100 | [diff] [blame] | 16 | |
Ville Syrjälä | 633cf8f | 2012-12-03 18:43:32 +0200 | [diff] [blame] | 17 | /* |
| 18 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" |
| 19 | * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" |
| 20 | * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" |
| 21 | * |
| 22 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same |
| 23 | * cacheline, the Head Pointer must not be greater than the Tail |
| 24 | * Pointer." |
| 25 | */ |
| 26 | #define I915_RING_FREE_SPACE 64 |
| 27 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 28 | struct intel_hw_status_page { |
Daniel Vetter | 4225d0f | 2012-04-26 23:28:16 +0200 | [diff] [blame] | 29 | u32 *page_addr; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 30 | unsigned int gfx_addr; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 31 | struct drm_i915_gem_object *obj; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 32 | }; |
| 33 | |
Ben Widawsky | b7287d8 | 2011-04-25 11:22:22 -0700 | [diff] [blame] | 34 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) |
| 35 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 36 | |
Ben Widawsky | b7287d8 | 2011-04-25 11:22:22 -0700 | [diff] [blame] | 37 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) |
| 38 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 39 | |
Ben Widawsky | b7287d8 | 2011-04-25 11:22:22 -0700 | [diff] [blame] | 40 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) |
| 41 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 42 | |
Ben Widawsky | b7287d8 | 2011-04-25 11:22:22 -0700 | [diff] [blame] | 43 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) |
| 44 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 45 | |
Ben Widawsky | b7287d8 | 2011-04-25 11:22:22 -0700 | [diff] [blame] | 46 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) |
| 47 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) |
Daniel Vetter | 870e86d | 2010-08-02 16:29:44 +0200 | [diff] [blame] | 48 | |
Naresh Kumar Kachhi | e9fea57 | 2014-03-12 16:39:41 +0530 | [diff] [blame] | 49 | #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) |
Chris Wilson | 9991ae7 | 2014-04-02 16:36:07 +0100 | [diff] [blame] | 50 | #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) |
Naresh Kumar Kachhi | e9fea57 | 2014-03-12 16:39:41 +0530 | [diff] [blame] | 51 | |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 52 | /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to |
| 53 | * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. |
| 54 | */ |
| 55 | #define i915_semaphore_seqno_size sizeof(uint64_t) |
| 56 | #define GEN8_SIGNAL_OFFSET(__ring, to) \ |
| 57 | (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ |
| 58 | ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ |
| 59 | (i915_semaphore_seqno_size * (to))) |
| 60 | |
| 61 | #define GEN8_WAIT_OFFSET(__ring, from) \ |
| 62 | (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ |
| 63 | ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ |
| 64 | (i915_semaphore_seqno_size * (__ring)->id)) |
| 65 | |
| 66 | #define GEN8_RING_SEMAPHORE_INIT do { \ |
| 67 | if (!dev_priv->semaphore_obj) { \ |
| 68 | break; \ |
| 69 | } \ |
| 70 | ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \ |
| 71 | ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \ |
| 72 | ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \ |
| 73 | ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \ |
| 74 | ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \ |
| 75 | ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \ |
| 76 | } while(0) |
| 77 | |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 78 | enum intel_ring_hangcheck_action { |
Mika Kuoppala | da66146 | 2013-09-06 16:03:28 +0300 | [diff] [blame] | 79 | HANGCHECK_IDLE = 0, |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 80 | HANGCHECK_WAIT, |
| 81 | HANGCHECK_ACTIVE, |
Mika Kuoppala | f260fe7 | 2014-08-05 17:16:26 +0300 | [diff] [blame] | 82 | HANGCHECK_ACTIVE_LOOP, |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 83 | HANGCHECK_KICK, |
| 84 | HANGCHECK_HUNG, |
| 85 | }; |
Mika Kuoppala | ad8beae | 2013-06-12 12:35:32 +0300 | [diff] [blame] | 86 | |
Mika Kuoppala | b6b0fac | 2014-01-30 19:04:43 +0200 | [diff] [blame] | 87 | #define HANGCHECK_SCORE_RING_HUNG 31 |
| 88 | |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 89 | struct intel_ring_hangcheck { |
Chris Wilson | 5087744 | 2014-03-21 12:41:53 +0000 | [diff] [blame] | 90 | u64 acthd; |
Mika Kuoppala | f260fe7 | 2014-08-05 17:16:26 +0300 | [diff] [blame] | 91 | u64 max_acthd; |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 92 | u32 seqno; |
Mika Kuoppala | 05407ff | 2013-05-30 09:04:29 +0300 | [diff] [blame] | 93 | int score; |
Mika Kuoppala | ad8beae | 2013-06-12 12:35:32 +0300 | [diff] [blame] | 94 | enum intel_ring_hangcheck_action action; |
Chris Wilson | 4be1738 | 2014-06-06 10:22:29 +0100 | [diff] [blame] | 95 | int deadlock; |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 96 | }; |
| 97 | |
Oscar Mateo | 8ee1497 | 2014-05-22 14:13:34 +0100 | [diff] [blame] | 98 | struct intel_ringbuffer { |
| 99 | struct drm_i915_gem_object *obj; |
| 100 | void __iomem *virtual_start; |
| 101 | |
Daniel Vetter | 0c7dd53 | 2014-08-11 16:17:44 +0200 | [diff] [blame] | 102 | struct intel_engine_cs *ring; |
| 103 | |
Oscar Mateo | 8ee1497 | 2014-05-22 14:13:34 +0100 | [diff] [blame] | 104 | u32 head; |
| 105 | u32 tail; |
| 106 | int space; |
| 107 | int size; |
| 108 | int effective_size; |
John Harrison | 29b1b41 | 2015-06-18 13:10:09 +0100 | [diff] [blame] | 109 | int reserved_size; |
| 110 | int reserved_tail; |
| 111 | bool reserved_in_use; |
Oscar Mateo | 8ee1497 | 2014-05-22 14:13:34 +0100 | [diff] [blame] | 112 | |
| 113 | /** We track the position of the requests in the ring buffer, and |
| 114 | * when each is retired we increment last_retired_head as the GPU |
| 115 | * must have finished processing the request and so we know we |
| 116 | * can advance the ringbuffer up to that position. |
| 117 | * |
| 118 | * last_retired_head is set to -1 after the value is consumed so |
| 119 | * we can detect new retirements. |
| 120 | */ |
| 121 | u32 last_retired_head; |
| 122 | }; |
| 123 | |
Nick Hoath | 2107637 | 2015-01-15 13:10:38 +0000 | [diff] [blame] | 124 | struct intel_context; |
Francisco Jerez | 4e86f72 | 2015-05-29 16:44:14 +0300 | [diff] [blame] | 125 | struct drm_i915_reg_descriptor; |
Nick Hoath | 2107637 | 2015-01-15 13:10:38 +0000 | [diff] [blame] | 126 | |
Arun Siluvery | 17ee950 | 2015-06-19 19:07:01 +0100 | [diff] [blame] | 127 | /* |
| 128 | * we use a single page to load ctx workarounds so all of these |
| 129 | * values are referred in terms of dwords |
| 130 | * |
| 131 | * struct i915_wa_ctx_bb: |
| 132 | * offset: specifies batch starting position, also helpful in case |
| 133 | * if we want to have multiple batches at different offsets based on |
| 134 | * some criteria. It is not a requirement at the moment but provides |
| 135 | * an option for future use. |
| 136 | * size: size of the batch in DWORDS |
| 137 | */ |
| 138 | struct i915_ctx_workarounds { |
| 139 | struct i915_wa_ctx_bb { |
| 140 | u32 offset; |
| 141 | u32 size; |
| 142 | } indirect_ctx, per_ctx; |
| 143 | struct drm_i915_gem_object *obj; |
| 144 | }; |
| 145 | |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 146 | struct intel_engine_cs { |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 147 | const char *name; |
Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 148 | enum intel_ring_id { |
Daniel Vetter | 96154f2 | 2011-12-14 13:57:00 +0100 | [diff] [blame] | 149 | RCS = 0x0, |
| 150 | VCS, |
| 151 | BCS, |
Ben Widawsky | 4a3dd19 | 2013-05-28 19:22:19 -0700 | [diff] [blame] | 152 | VECS, |
Zhao Yakui | 845f74a | 2014-04-17 10:37:37 +0800 | [diff] [blame] | 153 | VCS2 |
Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 154 | } id; |
Zhao Yakui | 845f74a | 2014-04-17 10:37:37 +0800 | [diff] [blame] | 155 | #define I915_NUM_RINGS 5 |
Zhao Yakui | b1a9330 | 2014-04-17 10:37:36 +0800 | [diff] [blame] | 156 | #define LAST_USER_RING (VECS + 1) |
Daniel Vetter | 333e9fe | 2010-08-02 16:24:01 +0200 | [diff] [blame] | 157 | u32 mmio_base; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 158 | struct drm_device *dev; |
Oscar Mateo | 8ee1497 | 2014-05-22 14:13:34 +0100 | [diff] [blame] | 159 | struct intel_ringbuffer *buffer; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 160 | |
Chris Wilson | 06fbca7 | 2015-04-07 16:20:36 +0100 | [diff] [blame] | 161 | /* |
| 162 | * A pool of objects to use as shadow copies of client batch buffers |
| 163 | * when the command parser is enabled. Prevents the client from |
| 164 | * modifying the batch contents after software parsing. |
| 165 | */ |
| 166 | struct i915_gem_batch_pool batch_pool; |
| 167 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 168 | struct intel_hw_status_page status_page; |
Arun Siluvery | 17ee950 | 2015-06-19 19:07:01 +0100 | [diff] [blame] | 169 | struct i915_ctx_workarounds wa_ctx; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 170 | |
Daniel Vetter | c7113cc | 2013-07-04 23:35:29 +0200 | [diff] [blame] | 171 | unsigned irq_refcount; /* protected by dev_priv->irq_lock */ |
Daniel Vetter | 6a848cc | 2012-04-11 22:12:46 +0200 | [diff] [blame] | 172 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
John Harrison | 581c26e8 | 2014-11-24 18:49:39 +0000 | [diff] [blame] | 173 | struct drm_i915_gem_request *trace_irq_req; |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 174 | bool __must_check (*irq_get)(struct intel_engine_cs *ring); |
| 175 | void (*irq_put)(struct intel_engine_cs *ring); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 176 | |
Daniel Vetter | ecfe00d | 2014-11-20 00:33:04 +0100 | [diff] [blame] | 177 | int (*init_hw)(struct intel_engine_cs *ring); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 178 | |
John Harrison | 8753181 | 2015-05-29 17:43:44 +0100 | [diff] [blame] | 179 | int (*init_context)(struct drm_i915_gem_request *req); |
Arun Siluvery | 86d7f23 | 2014-08-26 14:44:50 +0100 | [diff] [blame] | 180 | |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 181 | void (*write_tail)(struct intel_engine_cs *ring, |
Chris Wilson | 297b0c5 | 2010-10-22 17:02:41 +0100 | [diff] [blame] | 182 | u32 value); |
John Harrison | a84c3ae | 2015-05-29 17:43:57 +0100 | [diff] [blame] | 183 | int __must_check (*flush)(struct drm_i915_gem_request *req, |
Chris Wilson | b72f3ac | 2011-01-04 17:34:02 +0000 | [diff] [blame] | 184 | u32 invalidate_domains, |
| 185 | u32 flush_domains); |
John Harrison | ee044a8 | 2015-05-29 17:44:00 +0100 | [diff] [blame^] | 186 | int (*add_request)(struct drm_i915_gem_request *req); |
Chris Wilson | b2eadbc | 2012-08-09 10:58:30 +0100 | [diff] [blame] | 187 | /* Some chipsets are not quite as coherent as advertised and need |
| 188 | * an expensive kick to force a true read of the up-to-date seqno. |
| 189 | * However, the up-to-date seqno is not always required and the last |
| 190 | * seen value is good enough. Note that the seqno will always be |
| 191 | * monotonic, even if not coherent. |
| 192 | */ |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 193 | u32 (*get_seqno)(struct intel_engine_cs *ring, |
Chris Wilson | b2eadbc | 2012-08-09 10:58:30 +0100 | [diff] [blame] | 194 | bool lazy_coherency); |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 195 | void (*set_seqno)(struct intel_engine_cs *ring, |
Mika Kuoppala | b70ec5b | 2012-12-19 11:13:05 +0200 | [diff] [blame] | 196 | u32 seqno); |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 197 | int (*dispatch_execbuffer)(struct intel_engine_cs *ring, |
Ben Widawsky | 9bcb144 | 2014-04-28 19:29:25 -0700 | [diff] [blame] | 198 | u64 offset, u32 length, |
John Harrison | 8e004ef | 2015-02-13 11:48:10 +0000 | [diff] [blame] | 199 | unsigned dispatch_flags); |
Chris Wilson | d7d4eed | 2012-10-17 12:09:54 +0100 | [diff] [blame] | 200 | #define I915_DISPATCH_SECURE 0x1 |
Daniel Vetter | b45305f | 2012-12-17 16:21:27 +0100 | [diff] [blame] | 201 | #define I915_DISPATCH_PINNED 0x2 |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 202 | void (*cleanup)(struct intel_engine_cs *ring); |
Ben Widawsky | ebc348b | 2014-04-29 14:52:28 -0700 | [diff] [blame] | 203 | |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 204 | /* GEN8 signal/wait table - never trust comments! |
| 205 | * signal to signal to signal to signal to signal to |
| 206 | * RCS VCS BCS VECS VCS2 |
| 207 | * -------------------------------------------------------------------- |
| 208 | * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | |
| 209 | * |------------------------------------------------------------------- |
| 210 | * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | |
| 211 | * |------------------------------------------------------------------- |
| 212 | * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | |
| 213 | * |------------------------------------------------------------------- |
| 214 | * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | |
| 215 | * |------------------------------------------------------------------- |
| 216 | * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | |
| 217 | * |------------------------------------------------------------------- |
| 218 | * |
| 219 | * Generalization: |
| 220 | * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) |
| 221 | * ie. transpose of g(x, y) |
| 222 | * |
| 223 | * sync from sync from sync from sync from sync from |
| 224 | * RCS VCS BCS VECS VCS2 |
| 225 | * -------------------------------------------------------------------- |
| 226 | * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | |
| 227 | * |------------------------------------------------------------------- |
| 228 | * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | |
| 229 | * |------------------------------------------------------------------- |
| 230 | * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | |
| 231 | * |------------------------------------------------------------------- |
| 232 | * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | |
| 233 | * |------------------------------------------------------------------- |
| 234 | * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | |
| 235 | * |------------------------------------------------------------------- |
| 236 | * |
| 237 | * Generalization: |
| 238 | * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) |
| 239 | * ie. transpose of f(x, y) |
| 240 | */ |
Ben Widawsky | ebc348b | 2014-04-29 14:52:28 -0700 | [diff] [blame] | 241 | struct { |
| 242 | u32 sync_seqno[I915_NUM_RINGS-1]; |
Ben Widawsky | 78325f2 | 2014-04-29 14:52:29 -0700 | [diff] [blame] | 243 | |
Ben Widawsky | 3e78998 | 2014-06-30 09:53:37 -0700 | [diff] [blame] | 244 | union { |
| 245 | struct { |
| 246 | /* our mbox written by others */ |
| 247 | u32 wait[I915_NUM_RINGS]; |
| 248 | /* mboxes this ring signals to */ |
| 249 | u32 signal[I915_NUM_RINGS]; |
| 250 | } mbox; |
| 251 | u64 signal_ggtt[I915_NUM_RINGS]; |
| 252 | }; |
Ben Widawsky | 78325f2 | 2014-04-29 14:52:29 -0700 | [diff] [blame] | 253 | |
| 254 | /* AKA wait() */ |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 255 | int (*sync_to)(struct intel_engine_cs *ring, |
| 256 | struct intel_engine_cs *to, |
Ben Widawsky | 78325f2 | 2014-04-29 14:52:29 -0700 | [diff] [blame] | 257 | u32 seqno); |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 258 | int (*signal)(struct intel_engine_cs *signaller, |
Ben Widawsky | 024a43e | 2014-04-29 14:52:30 -0700 | [diff] [blame] | 259 | /* num_dwords needed by caller */ |
| 260 | unsigned int num_dwords); |
Ben Widawsky | ebc348b | 2014-04-29 14:52:28 -0700 | [diff] [blame] | 261 | } semaphore; |
Ben Widawsky | ad776f8 | 2013-05-28 19:22:18 -0700 | [diff] [blame] | 262 | |
Oscar Mateo | 4da46e1 | 2014-07-24 17:04:27 +0100 | [diff] [blame] | 263 | /* Execlists */ |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 264 | spinlock_t execlist_lock; |
| 265 | struct list_head execlist_queue; |
Thomas Daniel | c86ee3a9 | 2014-11-13 10:27:05 +0000 | [diff] [blame] | 266 | struct list_head execlist_retired_req_list; |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 267 | u8 next_context_status_buffer; |
Oscar Mateo | 73d477f | 2014-07-24 17:04:31 +0100 | [diff] [blame] | 268 | u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ |
Nick Hoath | 72f95af | 2015-01-15 13:10:37 +0000 | [diff] [blame] | 269 | int (*emit_request)(struct intel_ringbuffer *ringbuf, |
| 270 | struct drm_i915_gem_request *request); |
John Harrison | 7deb4d398 | 2015-05-29 17:43:59 +0100 | [diff] [blame] | 271 | int (*emit_flush)(struct drm_i915_gem_request *request, |
Oscar Mateo | 4712274 | 2014-07-24 17:04:28 +0100 | [diff] [blame] | 272 | u32 invalidate_domains, |
| 273 | u32 flush_domains); |
Oscar Mateo | 1564858 | 2014-07-24 17:04:32 +0100 | [diff] [blame] | 274 | int (*emit_bb_start)(struct intel_ringbuffer *ringbuf, |
Nick Hoath | 2107637 | 2015-01-15 13:10:38 +0000 | [diff] [blame] | 275 | struct intel_context *ctx, |
John Harrison | 8e004ef | 2015-02-13 11:48:10 +0000 | [diff] [blame] | 276 | u64 offset, unsigned dispatch_flags); |
Oscar Mateo | 4da46e1 | 2014-07-24 17:04:27 +0100 | [diff] [blame] | 277 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 278 | /** |
| 279 | * List of objects currently involved in rendering from the |
| 280 | * ringbuffer. |
| 281 | * |
| 282 | * Includes buffers having the contents of their GPU caches |
John Harrison | 97b2a6a | 2014-11-24 18:49:26 +0000 | [diff] [blame] | 283 | * flushed, not necessarily primitives. last_read_req |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 284 | * represents when the rendering involved will be completed. |
| 285 | * |
| 286 | * A reference is held on the buffer while on this list. |
| 287 | */ |
| 288 | struct list_head active_list; |
| 289 | |
| 290 | /** |
| 291 | * List of breadcrumbs associated with GPU requests currently |
| 292 | * outstanding. |
| 293 | */ |
| 294 | struct list_head request_list; |
| 295 | |
Chris Wilson | a56ba56 | 2010-09-28 10:07:56 +0100 | [diff] [blame] | 296 | /** |
| 297 | * Do we have some not yet emitted requests outstanding? |
| 298 | */ |
John Harrison | 6259cea | 2014-11-24 18:49:29 +0000 | [diff] [blame] | 299 | struct drm_i915_gem_request *outstanding_lazy_request; |
Daniel Vetter | cc889e0 | 2012-06-13 20:45:19 +0200 | [diff] [blame] | 300 | bool gpu_caches_dirty; |
Chris Wilson | a56ba56 | 2010-09-28 10:07:56 +0100 | [diff] [blame] | 301 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 302 | wait_queue_head_t irq_queue; |
Zou Nan hai | 8d19215 | 2010-11-02 16:31:01 +0800 | [diff] [blame] | 303 | |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 304 | struct intel_context *default_context; |
| 305 | struct intel_context *last_context; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 306 | |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 307 | struct intel_ring_hangcheck hangcheck; |
| 308 | |
Chris Wilson | 0d1aaca | 2013-08-26 20:58:11 +0100 | [diff] [blame] | 309 | struct { |
| 310 | struct drm_i915_gem_object *obj; |
| 311 | u32 gtt_offset; |
| 312 | volatile u32 *cpu_page; |
| 313 | } scratch; |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 314 | |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 315 | bool needs_cmd_parser; |
| 316 | |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 317 | /* |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 318 | * Table of commands the command parser needs to know about |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 319 | * for this ring. |
| 320 | */ |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 321 | DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 322 | |
| 323 | /* |
| 324 | * Table of registers allowed in commands that read/write registers. |
| 325 | */ |
Francisco Jerez | 4e86f72 | 2015-05-29 16:44:14 +0300 | [diff] [blame] | 326 | const struct drm_i915_reg_descriptor *reg_table; |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 327 | int reg_count; |
| 328 | |
| 329 | /* |
| 330 | * Table of registers allowed in commands that read/write registers, but |
| 331 | * only from the DRM master. |
| 332 | */ |
Francisco Jerez | 4e86f72 | 2015-05-29 16:44:14 +0300 | [diff] [blame] | 333 | const struct drm_i915_reg_descriptor *master_reg_table; |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 334 | int master_reg_count; |
| 335 | |
| 336 | /* |
| 337 | * Returns the bitmask for the length field of the specified command. |
| 338 | * Return 0 for an unrecognized/invalid command. |
| 339 | * |
| 340 | * If the command parser finds an entry for a command in the ring's |
| 341 | * cmd_tables, it gets the command's length based on the table entry. |
| 342 | * If not, it calls this function to determine the per-ring length field |
| 343 | * encoding for the command (i.e. certain opcode ranges use certain bits |
| 344 | * to encode the command length in the header). |
| 345 | */ |
| 346 | u32 (*get_cmd_length_mask)(u32 cmd_header); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 347 | }; |
| 348 | |
Oscar Mateo | 48d8238 | 2014-07-24 17:04:23 +0100 | [diff] [blame] | 349 | bool intel_ring_initialized(struct intel_engine_cs *ring); |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 350 | |
Daniel Vetter | 96154f2 | 2011-12-14 13:57:00 +0100 | [diff] [blame] | 351 | static inline unsigned |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 352 | intel_ring_flag(struct intel_engine_cs *ring) |
Daniel Vetter | 96154f2 | 2011-12-14 13:57:00 +0100 | [diff] [blame] | 353 | { |
| 354 | return 1 << ring->id; |
| 355 | } |
| 356 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 357 | static inline u32 |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 358 | intel_ring_sync_index(struct intel_engine_cs *ring, |
| 359 | struct intel_engine_cs *other) |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 360 | { |
| 361 | int idx; |
| 362 | |
| 363 | /* |
Rodrigo Vivi | ddd4dbc | 2014-06-30 09:51:11 -0700 | [diff] [blame] | 364 | * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; |
| 365 | * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; |
| 366 | * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; |
| 367 | * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; |
| 368 | * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 369 | */ |
| 370 | |
| 371 | idx = (other - ring) - 1; |
| 372 | if (idx < 0) |
| 373 | idx += I915_NUM_RINGS; |
| 374 | |
| 375 | return idx; |
| 376 | } |
| 377 | |
| 378 | static inline u32 |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 379 | intel_read_status_page(struct intel_engine_cs *ring, |
Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 380 | int reg) |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 381 | { |
Daniel Vetter | 4225d0f | 2012-04-26 23:28:16 +0200 | [diff] [blame] | 382 | /* Ensure that the compiler doesn't optimize away the load. */ |
| 383 | barrier(); |
| 384 | return ring->status_page.page_addr[reg]; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 385 | } |
| 386 | |
Mika Kuoppala | b70ec5b | 2012-12-19 11:13:05 +0200 | [diff] [blame] | 387 | static inline void |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 388 | intel_write_status_page(struct intel_engine_cs *ring, |
Mika Kuoppala | b70ec5b | 2012-12-19 11:13:05 +0200 | [diff] [blame] | 389 | int reg, u32 value) |
| 390 | { |
| 391 | ring->status_page.page_addr[reg] = value; |
| 392 | } |
| 393 | |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 394 | /** |
| 395 | * Reads a dword out of the status page, which is written to from the command |
| 396 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or |
| 397 | * MI_STORE_DATA_IMM. |
| 398 | * |
| 399 | * The following dwords have a reserved meaning: |
| 400 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. |
| 401 | * 0x04: ring 0 head pointer |
| 402 | * 0x05: ring 1 head pointer (915-class) |
| 403 | * 0x06: ring 2 head pointer (915-class) |
| 404 | * 0x10-0x1b: Context status DWords (GM45) |
| 405 | * 0x1f: Last written status offset. (GM45) |
Thomas Daniel | b07da53 | 2015-02-18 11:48:21 +0000 | [diff] [blame] | 406 | * 0x20-0x2f: Reserved (Gen6+) |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 407 | * |
Thomas Daniel | b07da53 | 2015-02-18 11:48:21 +0000 | [diff] [blame] | 408 | * The area from dword 0x30 to 0x3ff is available for driver usage. |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 409 | */ |
Thomas Daniel | b07da53 | 2015-02-18 11:48:21 +0000 | [diff] [blame] | 410 | #define I915_GEM_HWS_INDEX 0x30 |
| 411 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 |
Jesse Barnes | 9a28977 | 2012-10-26 09:42:42 -0700 | [diff] [blame] | 412 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 413 | |
Thomas Daniel | 7ba717c | 2014-11-13 10:28:56 +0000 | [diff] [blame] | 414 | void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); |
| 415 | int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, |
| 416 | struct intel_ringbuffer *ringbuf); |
Oscar Mateo | 84c2377 | 2014-07-24 17:04:15 +0100 | [diff] [blame] | 417 | void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf); |
| 418 | int intel_alloc_ringbuffer_obj(struct drm_device *dev, |
| 419 | struct intel_ringbuffer *ringbuf); |
| 420 | |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 421 | void intel_stop_ring_buffer(struct intel_engine_cs *ring); |
| 422 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); |
Ben Widawsky | 96f298a | 2011-03-19 18:14:27 -0700 | [diff] [blame] | 423 | |
John Harrison | 6689cb2 | 2015-03-19 12:30:08 +0000 | [diff] [blame] | 424 | int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); |
| 425 | |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 426 | int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n); |
| 427 | int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring); |
| 428 | static inline void intel_ring_emit(struct intel_engine_cs *ring, |
Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 429 | u32 data) |
Chris Wilson | e898cd2 | 2010-08-04 15:18:14 +0100 | [diff] [blame] | 430 | { |
Oscar Mateo | 93b0a4e | 2014-05-22 14:13:36 +0100 | [diff] [blame] | 431 | struct intel_ringbuffer *ringbuf = ring->buffer; |
| 432 | iowrite32(data, ringbuf->virtual_start + ringbuf->tail); |
| 433 | ringbuf->tail += 4; |
Chris Wilson | e898cd2 | 2010-08-04 15:18:14 +0100 | [diff] [blame] | 434 | } |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 435 | static inline void intel_ring_advance(struct intel_engine_cs *ring) |
Chris Wilson | 0924673 | 2013-08-10 22:16:32 +0100 | [diff] [blame] | 436 | { |
Oscar Mateo | 93b0a4e | 2014-05-22 14:13:36 +0100 | [diff] [blame] | 437 | struct intel_ringbuffer *ringbuf = ring->buffer; |
| 438 | ringbuf->tail &= ringbuf->size - 1; |
Chris Wilson | 0924673 | 2013-08-10 22:16:32 +0100 | [diff] [blame] | 439 | } |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 440 | int __intel_ring_space(int head, int tail, int size); |
Dave Gordon | ebd0fd4 | 2014-11-27 11:22:49 +0000 | [diff] [blame] | 441 | void intel_ring_update_space(struct intel_ringbuffer *ringbuf); |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 442 | int intel_ring_space(struct intel_ringbuffer *ringbuf); |
| 443 | bool intel_ring_stopped(struct intel_engine_cs *ring); |
Chris Wilson | 0924673 | 2013-08-10 22:16:32 +0100 | [diff] [blame] | 444 | |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 445 | int __must_check intel_ring_idle(struct intel_engine_cs *ring); |
| 446 | void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); |
John Harrison | 4866d72 | 2015-05-29 17:43:55 +0100 | [diff] [blame] | 447 | int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); |
John Harrison | 2f20055 | 2015-05-29 17:43:53 +0100 | [diff] [blame] | 448 | int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 449 | |
Oscar Mateo | 9b1136d | 2014-07-24 17:04:24 +0100 | [diff] [blame] | 450 | void intel_fini_pipe_control(struct intel_engine_cs *ring); |
| 451 | int intel_init_pipe_control(struct intel_engine_cs *ring); |
| 452 | |
Xiang, Haihao | 5c1143b | 2010-09-16 10:43:11 +0800 | [diff] [blame] | 453 | int intel_init_render_ring_buffer(struct drm_device *dev); |
| 454 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
Zhao Yakui | 845f74a | 2014-04-17 10:37:37 +0800 | [diff] [blame] | 455 | int intel_init_bsd2_ring_buffer(struct drm_device *dev); |
Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 456 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
Ben Widawsky | 9a8a221 | 2013-05-28 19:22:23 -0700 | [diff] [blame] | 457 | int intel_init_vebox_ring_buffer(struct drm_device *dev); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 458 | |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 459 | u64 intel_ring_get_active_head(struct intel_engine_cs *ring); |
Daniel Vetter | 79f321b | 2010-09-24 21:20:10 +0200 | [diff] [blame] | 460 | |
Michel Thierry | 771b9a5 | 2014-11-11 16:47:33 +0000 | [diff] [blame] | 461 | int init_workarounds_ring(struct intel_engine_cs *ring); |
| 462 | |
Oscar Mateo | 1b5d063 | 2014-07-03 16:28:04 +0100 | [diff] [blame] | 463 | static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) |
Chris Wilson | a71d8d9 | 2012-02-15 11:25:36 +0000 | [diff] [blame] | 464 | { |
Oscar Mateo | 1b5d063 | 2014-07-03 16:28:04 +0100 | [diff] [blame] | 465 | return ringbuf->tail; |
Chris Wilson | a71d8d9 | 2012-02-15 11:25:36 +0000 | [diff] [blame] | 466 | } |
| 467 | |
John Harrison | b793a00 | 2014-11-24 18:49:25 +0000 | [diff] [blame] | 468 | static inline struct drm_i915_gem_request * |
| 469 | intel_ring_get_request(struct intel_engine_cs *ring) |
| 470 | { |
John Harrison | 6259cea | 2014-11-24 18:49:29 +0000 | [diff] [blame] | 471 | BUG_ON(ring->outstanding_lazy_request == NULL); |
| 472 | return ring->outstanding_lazy_request; |
John Harrison | b793a00 | 2014-11-24 18:49:25 +0000 | [diff] [blame] | 473 | } |
| 474 | |
John Harrison | 29b1b41 | 2015-06-18 13:10:09 +0100 | [diff] [blame] | 475 | /* |
| 476 | * Arbitrary size for largest possible 'add request' sequence. The code paths |
| 477 | * are complex and variable. Empirical measurement shows that the worst case |
| 478 | * is ILK at 136 words. Reserving too much is better than reserving too little |
| 479 | * as that allows for corner cases that might have been missed. So the figure |
| 480 | * has been rounded up to 160 words. |
| 481 | */ |
| 482 | #define MIN_SPACE_FOR_ADD_REQUEST 160 |
| 483 | |
| 484 | /* |
| 485 | * Reserve space in the ring to guarantee that the i915_add_request() call |
| 486 | * will always have sufficient room to do its stuff. The request creation |
| 487 | * code calls this automatically. |
| 488 | */ |
| 489 | void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); |
| 490 | /* Cancel the reservation, e.g. because the request is being discarded. */ |
| 491 | void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); |
| 492 | /* Use the reserved space - for use by i915_add_request() only. */ |
| 493 | void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); |
| 494 | /* Finish with the reserved space - for use by i915_add_request() only. */ |
| 495 | void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf); |
| 496 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 497 | #endif /* _INTEL_RINGBUFFER_H_ */ |