Oscar Mateo | b20385f | 2014-07-24 17:04:10 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2014 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Ben Widawsky <ben@bwidawsk.net> |
| 25 | * Michel Thierry <michel.thierry@intel.com> |
| 26 | * Thomas Daniel <thomas.daniel@intel.com> |
| 27 | * Oscar Mateo <oscar.mateo@intel.com> |
| 28 | * |
| 29 | */ |
| 30 | |
| 31 | /* |
| 32 | * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts". |
| 33 | * These expanded contexts enable a number of new abilities, especially |
| 34 | * "Execlists" (also implemented in this file). |
| 35 | * |
| 36 | * Execlists are the new method by which, on gen8+ hardware, workloads are |
| 37 | * submitted for execution (as opposed to the legacy, ringbuffer-based, method). |
| 38 | */ |
| 39 | |
| 40 | #include <drm/drmP.h> |
| 41 | #include <drm/i915_drm.h> |
| 42 | #include "i915_drv.h" |
Oscar Mateo | 127f100 | 2014-07-24 17:04:11 +0100 | [diff] [blame] | 43 | |
Oscar Mateo | 8c857917 | 2014-07-24 17:04:14 +0100 | [diff] [blame] | 44 | #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) |
| 45 | #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE) |
| 46 | |
| 47 | #define GEN8_LR_CONTEXT_ALIGN 4096 |
| 48 | |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 49 | #define RING_EXECLIST_QFULL (1 << 0x2) |
| 50 | #define RING_EXECLIST1_VALID (1 << 0x3) |
| 51 | #define RING_EXECLIST0_VALID (1 << 0x4) |
| 52 | #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) |
| 53 | #define RING_EXECLIST1_ACTIVE (1 << 0x11) |
| 54 | #define RING_EXECLIST0_ACTIVE (1 << 0x12) |
| 55 | |
| 56 | #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) |
| 57 | #define GEN8_CTX_STATUS_PREEMPTED (1 << 1) |
| 58 | #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) |
| 59 | #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) |
| 60 | #define GEN8_CTX_STATUS_COMPLETE (1 << 4) |
| 61 | #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) |
Oscar Mateo | 8670d6f | 2014-07-24 17:04:17 +0100 | [diff] [blame] | 62 | |
| 63 | #define CTX_LRI_HEADER_0 0x01 |
| 64 | #define CTX_CONTEXT_CONTROL 0x02 |
| 65 | #define CTX_RING_HEAD 0x04 |
| 66 | #define CTX_RING_TAIL 0x06 |
| 67 | #define CTX_RING_BUFFER_START 0x08 |
| 68 | #define CTX_RING_BUFFER_CONTROL 0x0a |
| 69 | #define CTX_BB_HEAD_U 0x0c |
| 70 | #define CTX_BB_HEAD_L 0x0e |
| 71 | #define CTX_BB_STATE 0x10 |
| 72 | #define CTX_SECOND_BB_HEAD_U 0x12 |
| 73 | #define CTX_SECOND_BB_HEAD_L 0x14 |
| 74 | #define CTX_SECOND_BB_STATE 0x16 |
| 75 | #define CTX_BB_PER_CTX_PTR 0x18 |
| 76 | #define CTX_RCS_INDIRECT_CTX 0x1a |
| 77 | #define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c |
| 78 | #define CTX_LRI_HEADER_1 0x21 |
| 79 | #define CTX_CTX_TIMESTAMP 0x22 |
| 80 | #define CTX_PDP3_UDW 0x24 |
| 81 | #define CTX_PDP3_LDW 0x26 |
| 82 | #define CTX_PDP2_UDW 0x28 |
| 83 | #define CTX_PDP2_LDW 0x2a |
| 84 | #define CTX_PDP1_UDW 0x2c |
| 85 | #define CTX_PDP1_LDW 0x2e |
| 86 | #define CTX_PDP0_UDW 0x30 |
| 87 | #define CTX_PDP0_LDW 0x32 |
| 88 | #define CTX_LRI_HEADER_2 0x41 |
| 89 | #define CTX_R_PWR_CLK_STATE 0x42 |
| 90 | #define CTX_GPGPU_CSR_BASE_ADDRESS 0x44 |
| 91 | |
Ben Widawsky | 84b790f | 2014-07-24 17:04:36 +0100 | [diff] [blame] | 92 | #define GEN8_CTX_VALID (1<<0) |
| 93 | #define GEN8_CTX_FORCE_PD_RESTORE (1<<1) |
| 94 | #define GEN8_CTX_FORCE_RESTORE (1<<2) |
| 95 | #define GEN8_CTX_L3LLC_COHERENT (1<<5) |
| 96 | #define GEN8_CTX_PRIVILEGE (1<<8) |
| 97 | enum { |
| 98 | ADVANCED_CONTEXT = 0, |
| 99 | LEGACY_CONTEXT, |
| 100 | ADVANCED_AD_CONTEXT, |
| 101 | LEGACY_64B_CONTEXT |
| 102 | }; |
| 103 | #define GEN8_CTX_MODE_SHIFT 3 |
| 104 | enum { |
| 105 | FAULT_AND_HANG = 0, |
| 106 | FAULT_AND_HALT, /* Debug only */ |
| 107 | FAULT_AND_STREAM, |
| 108 | FAULT_AND_CONTINUE /* Unsupported */ |
| 109 | }; |
| 110 | #define GEN8_CTX_ID_SHIFT 32 |
| 111 | |
Oscar Mateo | 127f100 | 2014-07-24 17:04:11 +0100 | [diff] [blame] | 112 | int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists) |
| 113 | { |
Daniel Vetter | bd84b1e | 2014-08-11 15:57:57 +0200 | [diff] [blame] | 114 | WARN_ON(i915.enable_ppgtt == -1); |
| 115 | |
Oscar Mateo | 127f100 | 2014-07-24 17:04:11 +0100 | [diff] [blame] | 116 | if (enable_execlists == 0) |
| 117 | return 0; |
| 118 | |
Oscar Mateo | 14bf993 | 2014-07-24 17:04:34 +0100 | [diff] [blame] | 119 | if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) && |
| 120 | i915.use_mmio_flip >= 0) |
Oscar Mateo | 127f100 | 2014-07-24 17:04:11 +0100 | [diff] [blame] | 121 | return 1; |
| 122 | |
| 123 | return 0; |
| 124 | } |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 125 | |
Ben Widawsky | 84b790f | 2014-07-24 17:04:36 +0100 | [diff] [blame] | 126 | u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) |
| 127 | { |
| 128 | u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj); |
| 129 | |
| 130 | /* LRCA is required to be 4K aligned so the more significant 20 bits |
| 131 | * are globally unique */ |
| 132 | return lrca >> 12; |
| 133 | } |
| 134 | |
| 135 | static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj) |
| 136 | { |
| 137 | uint64_t desc; |
| 138 | uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj); |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 139 | |
| 140 | WARN_ON(lrca & 0xFFFFFFFF00000FFFULL); |
Ben Widawsky | 84b790f | 2014-07-24 17:04:36 +0100 | [diff] [blame] | 141 | |
| 142 | desc = GEN8_CTX_VALID; |
| 143 | desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT; |
| 144 | desc |= GEN8_CTX_L3LLC_COHERENT; |
| 145 | desc |= GEN8_CTX_PRIVILEGE; |
| 146 | desc |= lrca; |
| 147 | desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT; |
| 148 | |
| 149 | /* TODO: WaDisableLiteRestore when we start using semaphore |
| 150 | * signalling between Command Streamers */ |
| 151 | /* desc |= GEN8_CTX_FORCE_RESTORE; */ |
| 152 | |
| 153 | return desc; |
| 154 | } |
| 155 | |
| 156 | static void execlists_elsp_write(struct intel_engine_cs *ring, |
| 157 | struct drm_i915_gem_object *ctx_obj0, |
| 158 | struct drm_i915_gem_object *ctx_obj1) |
| 159 | { |
| 160 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
| 161 | uint64_t temp = 0; |
| 162 | uint32_t desc[4]; |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 163 | unsigned long flags; |
Ben Widawsky | 84b790f | 2014-07-24 17:04:36 +0100 | [diff] [blame] | 164 | |
| 165 | /* XXX: You must always write both descriptors in the order below. */ |
| 166 | if (ctx_obj1) |
| 167 | temp = execlists_ctx_descriptor(ctx_obj1); |
| 168 | else |
| 169 | temp = 0; |
| 170 | desc[1] = (u32)(temp >> 32); |
| 171 | desc[0] = (u32)temp; |
| 172 | |
| 173 | temp = execlists_ctx_descriptor(ctx_obj0); |
| 174 | desc[3] = (u32)(temp >> 32); |
| 175 | desc[2] = (u32)temp; |
| 176 | |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 177 | /* Set Force Wakeup bit to prevent GT from entering C6 while ELSP writes |
| 178 | * are in progress. |
| 179 | * |
| 180 | * The other problem is that we can't just call gen6_gt_force_wake_get() |
| 181 | * because that function calls intel_runtime_pm_get(), which might sleep. |
| 182 | * Instead, we do the runtime_pm_get/put when creating/destroying requests. |
| 183 | */ |
| 184 | spin_lock_irqsave(&dev_priv->uncore.lock, flags); |
| 185 | if (dev_priv->uncore.forcewake_count++ == 0) |
| 186 | dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); |
| 187 | spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); |
Ben Widawsky | 84b790f | 2014-07-24 17:04:36 +0100 | [diff] [blame] | 188 | |
| 189 | I915_WRITE(RING_ELSP(ring), desc[1]); |
| 190 | I915_WRITE(RING_ELSP(ring), desc[0]); |
| 191 | I915_WRITE(RING_ELSP(ring), desc[3]); |
| 192 | /* The context is automatically loaded after the following */ |
| 193 | I915_WRITE(RING_ELSP(ring), desc[2]); |
| 194 | |
| 195 | /* ELSP is a wo register, so use another nearby reg for posting instead */ |
| 196 | POSTING_READ(RING_EXECLIST_STATUS(ring)); |
| 197 | |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 198 | /* Release Force Wakeup (see the big comment above). */ |
| 199 | spin_lock_irqsave(&dev_priv->uncore.lock, flags); |
| 200 | if (--dev_priv->uncore.forcewake_count == 0) |
| 201 | dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); |
| 202 | spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); |
Ben Widawsky | 84b790f | 2014-07-24 17:04:36 +0100 | [diff] [blame] | 203 | } |
| 204 | |
Oscar Mateo | ae1250b | 2014-07-24 17:04:37 +0100 | [diff] [blame] | 205 | static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail) |
| 206 | { |
| 207 | struct page *page; |
| 208 | uint32_t *reg_state; |
| 209 | |
| 210 | page = i915_gem_object_get_page(ctx_obj, 1); |
| 211 | reg_state = kmap_atomic(page); |
| 212 | |
| 213 | reg_state[CTX_RING_TAIL+1] = tail; |
| 214 | |
| 215 | kunmap_atomic(reg_state); |
| 216 | |
| 217 | return 0; |
| 218 | } |
| 219 | |
Ben Widawsky | 84b790f | 2014-07-24 17:04:36 +0100 | [diff] [blame] | 220 | static int execlists_submit_context(struct intel_engine_cs *ring, |
| 221 | struct intel_context *to0, u32 tail0, |
| 222 | struct intel_context *to1, u32 tail1) |
| 223 | { |
| 224 | struct drm_i915_gem_object *ctx_obj0; |
| 225 | struct drm_i915_gem_object *ctx_obj1 = NULL; |
| 226 | |
| 227 | ctx_obj0 = to0->engine[ring->id].state; |
| 228 | BUG_ON(!ctx_obj0); |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 229 | WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0)); |
Ben Widawsky | 84b790f | 2014-07-24 17:04:36 +0100 | [diff] [blame] | 230 | |
Oscar Mateo | ae1250b | 2014-07-24 17:04:37 +0100 | [diff] [blame] | 231 | execlists_ctx_write_tail(ctx_obj0, tail0); |
| 232 | |
Ben Widawsky | 84b790f | 2014-07-24 17:04:36 +0100 | [diff] [blame] | 233 | if (to1) { |
| 234 | ctx_obj1 = to1->engine[ring->id].state; |
| 235 | BUG_ON(!ctx_obj1); |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 236 | WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1)); |
Oscar Mateo | ae1250b | 2014-07-24 17:04:37 +0100 | [diff] [blame] | 237 | |
| 238 | execlists_ctx_write_tail(ctx_obj1, tail1); |
Ben Widawsky | 84b790f | 2014-07-24 17:04:36 +0100 | [diff] [blame] | 239 | } |
| 240 | |
| 241 | execlists_elsp_write(ring, ctx_obj0, ctx_obj1); |
| 242 | |
| 243 | return 0; |
| 244 | } |
| 245 | |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 246 | static void execlists_context_unqueue(struct intel_engine_cs *ring) |
| 247 | { |
| 248 | struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL; |
| 249 | struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL; |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 250 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
| 251 | |
| 252 | assert_spin_locked(&ring->execlist_lock); |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 253 | |
| 254 | if (list_empty(&ring->execlist_queue)) |
| 255 | return; |
| 256 | |
| 257 | /* Try to read in pairs */ |
| 258 | list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue, |
| 259 | execlist_link) { |
| 260 | if (!req0) { |
| 261 | req0 = cursor; |
| 262 | } else if (req0->ctx == cursor->ctx) { |
| 263 | /* Same ctx: ignore first request, as second request |
| 264 | * will update tail past first request's workload */ |
Oscar Mateo | e1fee72 | 2014-07-24 17:04:40 +0100 | [diff] [blame] | 265 | cursor->elsp_submitted = req0->elsp_submitted; |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 266 | list_del(&req0->execlist_link); |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 267 | queue_work(dev_priv->wq, &req0->work); |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 268 | req0 = cursor; |
| 269 | } else { |
| 270 | req1 = cursor; |
| 271 | break; |
| 272 | } |
| 273 | } |
| 274 | |
Oscar Mateo | e1fee72 | 2014-07-24 17:04:40 +0100 | [diff] [blame] | 275 | WARN_ON(req1 && req1->elsp_submitted); |
| 276 | |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 277 | WARN_ON(execlists_submit_context(ring, req0->ctx, req0->tail, |
| 278 | req1 ? req1->ctx : NULL, |
| 279 | req1 ? req1->tail : 0)); |
Oscar Mateo | e1fee72 | 2014-07-24 17:04:40 +0100 | [diff] [blame] | 280 | |
| 281 | req0->elsp_submitted++; |
| 282 | if (req1) |
| 283 | req1->elsp_submitted++; |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 284 | } |
| 285 | |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 286 | static bool execlists_check_remove_request(struct intel_engine_cs *ring, |
| 287 | u32 request_id) |
| 288 | { |
| 289 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
| 290 | struct intel_ctx_submit_request *head_req; |
| 291 | |
| 292 | assert_spin_locked(&ring->execlist_lock); |
| 293 | |
| 294 | head_req = list_first_entry_or_null(&ring->execlist_queue, |
| 295 | struct intel_ctx_submit_request, |
| 296 | execlist_link); |
| 297 | |
| 298 | if (head_req != NULL) { |
| 299 | struct drm_i915_gem_object *ctx_obj = |
| 300 | head_req->ctx->engine[ring->id].state; |
| 301 | if (intel_execlists_ctx_id(ctx_obj) == request_id) { |
Oscar Mateo | e1fee72 | 2014-07-24 17:04:40 +0100 | [diff] [blame] | 302 | WARN(head_req->elsp_submitted == 0, |
| 303 | "Never submitted head request\n"); |
| 304 | |
| 305 | if (--head_req->elsp_submitted <= 0) { |
| 306 | list_del(&head_req->execlist_link); |
| 307 | queue_work(dev_priv->wq, &head_req->work); |
| 308 | return true; |
| 309 | } |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 310 | } |
| 311 | } |
| 312 | |
| 313 | return false; |
| 314 | } |
| 315 | |
| 316 | void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring) |
| 317 | { |
| 318 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
| 319 | u32 status_pointer; |
| 320 | u8 read_pointer; |
| 321 | u8 write_pointer; |
| 322 | u32 status; |
| 323 | u32 status_id; |
| 324 | u32 submit_contexts = 0; |
| 325 | |
| 326 | status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); |
| 327 | |
| 328 | read_pointer = ring->next_context_status_buffer; |
| 329 | write_pointer = status_pointer & 0x07; |
| 330 | if (read_pointer > write_pointer) |
| 331 | write_pointer += 6; |
| 332 | |
| 333 | spin_lock(&ring->execlist_lock); |
| 334 | |
| 335 | while (read_pointer < write_pointer) { |
| 336 | read_pointer++; |
| 337 | status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + |
| 338 | (read_pointer % 6) * 8); |
| 339 | status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + |
| 340 | (read_pointer % 6) * 8 + 4); |
| 341 | |
Oscar Mateo | e1fee72 | 2014-07-24 17:04:40 +0100 | [diff] [blame] | 342 | if (status & GEN8_CTX_STATUS_PREEMPTED) { |
| 343 | if (status & GEN8_CTX_STATUS_LITE_RESTORE) { |
| 344 | if (execlists_check_remove_request(ring, status_id)) |
| 345 | WARN(1, "Lite Restored request removed from queue\n"); |
| 346 | } else |
| 347 | WARN(1, "Preemption without Lite Restore\n"); |
| 348 | } |
| 349 | |
| 350 | if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) || |
| 351 | (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) { |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 352 | if (execlists_check_remove_request(ring, status_id)) |
| 353 | submit_contexts++; |
| 354 | } |
| 355 | } |
| 356 | |
| 357 | if (submit_contexts != 0) |
| 358 | execlists_context_unqueue(ring); |
| 359 | |
| 360 | spin_unlock(&ring->execlist_lock); |
| 361 | |
| 362 | WARN(submit_contexts > 2, "More than two context complete events?\n"); |
| 363 | ring->next_context_status_buffer = write_pointer % 6; |
| 364 | |
| 365 | I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), |
| 366 | ((u32)ring->next_context_status_buffer & 0x07) << 8); |
| 367 | } |
| 368 | |
| 369 | static void execlists_free_request_task(struct work_struct *work) |
| 370 | { |
| 371 | struct intel_ctx_submit_request *req = |
| 372 | container_of(work, struct intel_ctx_submit_request, work); |
| 373 | struct drm_device *dev = req->ring->dev; |
| 374 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 375 | |
| 376 | intel_runtime_pm_put(dev_priv); |
| 377 | |
| 378 | mutex_lock(&dev->struct_mutex); |
| 379 | i915_gem_context_unreference(req->ctx); |
| 380 | mutex_unlock(&dev->struct_mutex); |
| 381 | |
| 382 | kfree(req); |
| 383 | } |
| 384 | |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 385 | static int execlists_context_queue(struct intel_engine_cs *ring, |
| 386 | struct intel_context *to, |
| 387 | u32 tail) |
| 388 | { |
Oscar Mateo | f1ad5a1 | 2014-07-24 17:04:41 +0100 | [diff] [blame] | 389 | struct intel_ctx_submit_request *req = NULL, *cursor; |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 390 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 391 | unsigned long flags; |
Oscar Mateo | f1ad5a1 | 2014-07-24 17:04:41 +0100 | [diff] [blame] | 392 | int num_elements = 0; |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 393 | |
| 394 | req = kzalloc(sizeof(*req), GFP_KERNEL); |
| 395 | if (req == NULL) |
| 396 | return -ENOMEM; |
| 397 | req->ctx = to; |
| 398 | i915_gem_context_reference(req->ctx); |
| 399 | req->ring = ring; |
| 400 | req->tail = tail; |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 401 | INIT_WORK(&req->work, execlists_free_request_task); |
| 402 | |
| 403 | intel_runtime_pm_get(dev_priv); |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 404 | |
| 405 | spin_lock_irqsave(&ring->execlist_lock, flags); |
| 406 | |
Oscar Mateo | f1ad5a1 | 2014-07-24 17:04:41 +0100 | [diff] [blame] | 407 | list_for_each_entry(cursor, &ring->execlist_queue, execlist_link) |
| 408 | if (++num_elements > 2) |
| 409 | break; |
| 410 | |
| 411 | if (num_elements > 2) { |
| 412 | struct intel_ctx_submit_request *tail_req; |
| 413 | |
| 414 | tail_req = list_last_entry(&ring->execlist_queue, |
| 415 | struct intel_ctx_submit_request, |
| 416 | execlist_link); |
| 417 | |
| 418 | if (to == tail_req->ctx) { |
| 419 | WARN(tail_req->elsp_submitted != 0, |
| 420 | "More than 2 already-submitted reqs queued\n"); |
| 421 | list_del(&tail_req->execlist_link); |
| 422 | queue_work(dev_priv->wq, &tail_req->work); |
| 423 | } |
| 424 | } |
| 425 | |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 426 | list_add_tail(&req->execlist_link, &ring->execlist_queue); |
Oscar Mateo | f1ad5a1 | 2014-07-24 17:04:41 +0100 | [diff] [blame] | 427 | if (num_elements == 0) |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 428 | execlists_context_unqueue(ring); |
| 429 | |
| 430 | spin_unlock_irqrestore(&ring->execlist_lock, flags); |
| 431 | |
| 432 | return 0; |
| 433 | } |
| 434 | |
Oscar Mateo | ba8b7cc | 2014-07-24 17:04:33 +0100 | [diff] [blame] | 435 | static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf) |
| 436 | { |
| 437 | struct intel_engine_cs *ring = ringbuf->ring; |
| 438 | uint32_t flush_domains; |
| 439 | int ret; |
| 440 | |
| 441 | flush_domains = 0; |
| 442 | if (ring->gpu_caches_dirty) |
| 443 | flush_domains = I915_GEM_GPU_DOMAINS; |
| 444 | |
| 445 | ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains); |
| 446 | if (ret) |
| 447 | return ret; |
| 448 | |
| 449 | ring->gpu_caches_dirty = false; |
| 450 | return 0; |
| 451 | } |
| 452 | |
| 453 | static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, |
| 454 | struct list_head *vmas) |
| 455 | { |
| 456 | struct intel_engine_cs *ring = ringbuf->ring; |
| 457 | struct i915_vma *vma; |
| 458 | uint32_t flush_domains = 0; |
| 459 | bool flush_chipset = false; |
| 460 | int ret; |
| 461 | |
| 462 | list_for_each_entry(vma, vmas, exec_list) { |
| 463 | struct drm_i915_gem_object *obj = vma->obj; |
| 464 | |
| 465 | ret = i915_gem_object_sync(obj, ring); |
| 466 | if (ret) |
| 467 | return ret; |
| 468 | |
| 469 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) |
| 470 | flush_chipset |= i915_gem_clflush_object(obj, false); |
| 471 | |
| 472 | flush_domains |= obj->base.write_domain; |
| 473 | } |
| 474 | |
| 475 | if (flush_domains & I915_GEM_DOMAIN_GTT) |
| 476 | wmb(); |
| 477 | |
| 478 | /* Unconditionally invalidate gpu caches and ensure that we do flush |
| 479 | * any residual writes from the previous batch. |
| 480 | */ |
| 481 | return logical_ring_invalidate_all_caches(ringbuf); |
| 482 | } |
| 483 | |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 484 | int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, |
| 485 | struct intel_engine_cs *ring, |
| 486 | struct intel_context *ctx, |
| 487 | struct drm_i915_gem_execbuffer2 *args, |
| 488 | struct list_head *vmas, |
| 489 | struct drm_i915_gem_object *batch_obj, |
| 490 | u64 exec_start, u32 flags) |
| 491 | { |
Oscar Mateo | ba8b7cc | 2014-07-24 17:04:33 +0100 | [diff] [blame] | 492 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 493 | struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; |
| 494 | int instp_mode; |
| 495 | u32 instp_mask; |
| 496 | int ret; |
| 497 | |
| 498 | instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; |
| 499 | instp_mask = I915_EXEC_CONSTANTS_MASK; |
| 500 | switch (instp_mode) { |
| 501 | case I915_EXEC_CONSTANTS_REL_GENERAL: |
| 502 | case I915_EXEC_CONSTANTS_ABSOLUTE: |
| 503 | case I915_EXEC_CONSTANTS_REL_SURFACE: |
| 504 | if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { |
| 505 | DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); |
| 506 | return -EINVAL; |
| 507 | } |
| 508 | |
| 509 | if (instp_mode != dev_priv->relative_constants_mode) { |
| 510 | if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { |
| 511 | DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); |
| 512 | return -EINVAL; |
| 513 | } |
| 514 | |
| 515 | /* The HW changed the meaning on this bit on gen6 */ |
| 516 | instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; |
| 517 | } |
| 518 | break; |
| 519 | default: |
| 520 | DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode); |
| 521 | return -EINVAL; |
| 522 | } |
| 523 | |
| 524 | if (args->num_cliprects != 0) { |
| 525 | DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); |
| 526 | return -EINVAL; |
| 527 | } else { |
| 528 | if (args->DR4 == 0xffffffff) { |
| 529 | DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); |
| 530 | args->DR4 = 0; |
| 531 | } |
| 532 | |
| 533 | if (args->DR1 || args->DR4 || args->cliprects_ptr) { |
| 534 | DRM_DEBUG("0 cliprects but dirt in cliprects fields\n"); |
| 535 | return -EINVAL; |
| 536 | } |
| 537 | } |
| 538 | |
| 539 | if (args->flags & I915_EXEC_GEN7_SOL_RESET) { |
| 540 | DRM_DEBUG("sol reset is gen7 only\n"); |
| 541 | return -EINVAL; |
| 542 | } |
| 543 | |
| 544 | ret = execlists_move_to_gpu(ringbuf, vmas); |
| 545 | if (ret) |
| 546 | return ret; |
| 547 | |
| 548 | if (ring == &dev_priv->ring[RCS] && |
| 549 | instp_mode != dev_priv->relative_constants_mode) { |
| 550 | ret = intel_logical_ring_begin(ringbuf, 4); |
| 551 | if (ret) |
| 552 | return ret; |
| 553 | |
| 554 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
| 555 | intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1)); |
| 556 | intel_logical_ring_emit(ringbuf, INSTPM); |
| 557 | intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode); |
| 558 | intel_logical_ring_advance(ringbuf); |
| 559 | |
| 560 | dev_priv->relative_constants_mode = instp_mode; |
| 561 | } |
| 562 | |
| 563 | ret = ring->emit_bb_start(ringbuf, exec_start, flags); |
| 564 | if (ret) |
| 565 | return ret; |
| 566 | |
| 567 | i915_gem_execbuffer_move_to_active(vmas, ring); |
| 568 | i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); |
| 569 | |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 570 | return 0; |
| 571 | } |
| 572 | |
| 573 | void intel_logical_ring_stop(struct intel_engine_cs *ring) |
| 574 | { |
Oscar Mateo | 9832b9d | 2014-07-24 17:04:30 +0100 | [diff] [blame] | 575 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
| 576 | int ret; |
| 577 | |
| 578 | if (!intel_ring_initialized(ring)) |
| 579 | return; |
| 580 | |
| 581 | ret = intel_ring_idle(ring); |
| 582 | if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) |
| 583 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", |
| 584 | ring->name, ret); |
| 585 | |
| 586 | /* TODO: Is this correct with Execlists enabled? */ |
| 587 | I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); |
| 588 | if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { |
| 589 | DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); |
| 590 | return; |
| 591 | } |
| 592 | I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 593 | } |
| 594 | |
Oscar Mateo | 48e29f5 | 2014-07-24 17:04:29 +0100 | [diff] [blame] | 595 | int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf) |
| 596 | { |
| 597 | struct intel_engine_cs *ring = ringbuf->ring; |
| 598 | int ret; |
| 599 | |
| 600 | if (!ring->gpu_caches_dirty) |
| 601 | return 0; |
| 602 | |
| 603 | ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS); |
| 604 | if (ret) |
| 605 | return ret; |
| 606 | |
| 607 | ring->gpu_caches_dirty = false; |
| 608 | return 0; |
| 609 | } |
| 610 | |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 611 | void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf) |
| 612 | { |
Ben Widawsky | 84b790f | 2014-07-24 17:04:36 +0100 | [diff] [blame] | 613 | struct intel_engine_cs *ring = ringbuf->ring; |
| 614 | struct intel_context *ctx = ringbuf->FIXME_lrc_ctx; |
| 615 | |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 616 | intel_logical_ring_advance(ringbuf); |
| 617 | |
Ben Widawsky | 84b790f | 2014-07-24 17:04:36 +0100 | [diff] [blame] | 618 | if (intel_ring_stopped(ring)) |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 619 | return; |
| 620 | |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 621 | execlists_context_queue(ring, ctx, ringbuf->tail); |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 622 | } |
| 623 | |
Oscar Mateo | 48e29f5 | 2014-07-24 17:04:29 +0100 | [diff] [blame] | 624 | static int logical_ring_alloc_seqno(struct intel_engine_cs *ring, |
| 625 | struct intel_context *ctx) |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 626 | { |
| 627 | if (ring->outstanding_lazy_seqno) |
| 628 | return 0; |
| 629 | |
| 630 | if (ring->preallocated_lazy_request == NULL) { |
| 631 | struct drm_i915_gem_request *request; |
| 632 | |
| 633 | request = kmalloc(sizeof(*request), GFP_KERNEL); |
| 634 | if (request == NULL) |
| 635 | return -ENOMEM; |
| 636 | |
Oscar Mateo | 48e29f5 | 2014-07-24 17:04:29 +0100 | [diff] [blame] | 637 | /* Hold a reference to the context this request belongs to |
| 638 | * (we will need it when the time comes to emit/retire the |
| 639 | * request). |
| 640 | */ |
| 641 | request->ctx = ctx; |
| 642 | i915_gem_context_reference(request->ctx); |
| 643 | |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 644 | ring->preallocated_lazy_request = request; |
| 645 | } |
| 646 | |
| 647 | return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); |
| 648 | } |
| 649 | |
| 650 | static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf, |
| 651 | int bytes) |
| 652 | { |
| 653 | struct intel_engine_cs *ring = ringbuf->ring; |
| 654 | struct drm_i915_gem_request *request; |
| 655 | u32 seqno = 0; |
| 656 | int ret; |
| 657 | |
| 658 | if (ringbuf->last_retired_head != -1) { |
| 659 | ringbuf->head = ringbuf->last_retired_head; |
| 660 | ringbuf->last_retired_head = -1; |
| 661 | |
| 662 | ringbuf->space = intel_ring_space(ringbuf); |
| 663 | if (ringbuf->space >= bytes) |
| 664 | return 0; |
| 665 | } |
| 666 | |
| 667 | list_for_each_entry(request, &ring->request_list, list) { |
| 668 | if (__intel_ring_space(request->tail, ringbuf->tail, |
| 669 | ringbuf->size) >= bytes) { |
| 670 | seqno = request->seqno; |
| 671 | break; |
| 672 | } |
| 673 | } |
| 674 | |
| 675 | if (seqno == 0) |
| 676 | return -ENOSPC; |
| 677 | |
| 678 | ret = i915_wait_seqno(ring, seqno); |
| 679 | if (ret) |
| 680 | return ret; |
| 681 | |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 682 | i915_gem_retire_requests_ring(ring); |
| 683 | ringbuf->head = ringbuf->last_retired_head; |
| 684 | ringbuf->last_retired_head = -1; |
| 685 | |
| 686 | ringbuf->space = intel_ring_space(ringbuf); |
| 687 | return 0; |
| 688 | } |
| 689 | |
| 690 | static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, |
| 691 | int bytes) |
| 692 | { |
| 693 | struct intel_engine_cs *ring = ringbuf->ring; |
| 694 | struct drm_device *dev = ring->dev; |
| 695 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 696 | unsigned long end; |
| 697 | int ret; |
| 698 | |
| 699 | ret = logical_ring_wait_request(ringbuf, bytes); |
| 700 | if (ret != -ENOSPC) |
| 701 | return ret; |
| 702 | |
| 703 | /* Force the context submission in case we have been skipping it */ |
| 704 | intel_logical_ring_advance_and_submit(ringbuf); |
| 705 | |
| 706 | /* With GEM the hangcheck timer should kick us out of the loop, |
| 707 | * leaving it early runs the risk of corrupting GEM state (due |
| 708 | * to running on almost untested codepaths). But on resume |
| 709 | * timers don't work yet, so prevent a complete hang in that |
| 710 | * case by choosing an insanely large timeout. */ |
| 711 | end = jiffies + 60 * HZ; |
| 712 | |
| 713 | do { |
| 714 | ringbuf->head = I915_READ_HEAD(ring); |
| 715 | ringbuf->space = intel_ring_space(ringbuf); |
| 716 | if (ringbuf->space >= bytes) { |
| 717 | ret = 0; |
| 718 | break; |
| 719 | } |
| 720 | |
| 721 | msleep(1); |
| 722 | |
| 723 | if (dev_priv->mm.interruptible && signal_pending(current)) { |
| 724 | ret = -ERESTARTSYS; |
| 725 | break; |
| 726 | } |
| 727 | |
| 728 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
| 729 | dev_priv->mm.interruptible); |
| 730 | if (ret) |
| 731 | break; |
| 732 | |
| 733 | if (time_after(jiffies, end)) { |
| 734 | ret = -EBUSY; |
| 735 | break; |
| 736 | } |
| 737 | } while (1); |
| 738 | |
| 739 | return ret; |
| 740 | } |
| 741 | |
| 742 | static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf) |
| 743 | { |
| 744 | uint32_t __iomem *virt; |
| 745 | int rem = ringbuf->size - ringbuf->tail; |
| 746 | |
| 747 | if (ringbuf->space < rem) { |
| 748 | int ret = logical_ring_wait_for_space(ringbuf, rem); |
| 749 | |
| 750 | if (ret) |
| 751 | return ret; |
| 752 | } |
| 753 | |
| 754 | virt = ringbuf->virtual_start + ringbuf->tail; |
| 755 | rem /= 4; |
| 756 | while (rem--) |
| 757 | iowrite32(MI_NOOP, virt++); |
| 758 | |
| 759 | ringbuf->tail = 0; |
| 760 | ringbuf->space = intel_ring_space(ringbuf); |
| 761 | |
| 762 | return 0; |
| 763 | } |
| 764 | |
| 765 | static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes) |
| 766 | { |
| 767 | int ret; |
| 768 | |
| 769 | if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { |
| 770 | ret = logical_ring_wrap_buffer(ringbuf); |
| 771 | if (unlikely(ret)) |
| 772 | return ret; |
| 773 | } |
| 774 | |
| 775 | if (unlikely(ringbuf->space < bytes)) { |
| 776 | ret = logical_ring_wait_for_space(ringbuf, bytes); |
| 777 | if (unlikely(ret)) |
| 778 | return ret; |
| 779 | } |
| 780 | |
| 781 | return 0; |
| 782 | } |
| 783 | |
| 784 | int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords) |
| 785 | { |
| 786 | struct intel_engine_cs *ring = ringbuf->ring; |
| 787 | struct drm_device *dev = ring->dev; |
| 788 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 789 | int ret; |
| 790 | |
| 791 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
| 792 | dev_priv->mm.interruptible); |
| 793 | if (ret) |
| 794 | return ret; |
| 795 | |
| 796 | ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t)); |
| 797 | if (ret) |
| 798 | return ret; |
| 799 | |
| 800 | /* Preallocate the olr before touching the ring */ |
Oscar Mateo | 48e29f5 | 2014-07-24 17:04:29 +0100 | [diff] [blame] | 801 | ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx); |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 802 | if (ret) |
| 803 | return ret; |
| 804 | |
| 805 | ringbuf->space -= num_dwords * sizeof(uint32_t); |
| 806 | return 0; |
| 807 | } |
| 808 | |
Oscar Mateo | 9b1136d | 2014-07-24 17:04:24 +0100 | [diff] [blame] | 809 | static int gen8_init_common_ring(struct intel_engine_cs *ring) |
| 810 | { |
| 811 | struct drm_device *dev = ring->dev; |
| 812 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 813 | |
Oscar Mateo | 73d477f | 2014-07-24 17:04:31 +0100 | [diff] [blame] | 814 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); |
| 815 | I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); |
| 816 | |
Oscar Mateo | 9b1136d | 2014-07-24 17:04:24 +0100 | [diff] [blame] | 817 | I915_WRITE(RING_MODE_GEN7(ring), |
| 818 | _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | |
| 819 | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); |
| 820 | POSTING_READ(RING_MODE_GEN7(ring)); |
| 821 | DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); |
| 822 | |
| 823 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); |
| 824 | |
| 825 | return 0; |
| 826 | } |
| 827 | |
| 828 | static int gen8_init_render_ring(struct intel_engine_cs *ring) |
| 829 | { |
| 830 | struct drm_device *dev = ring->dev; |
| 831 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 832 | int ret; |
| 833 | |
| 834 | ret = gen8_init_common_ring(ring); |
| 835 | if (ret) |
| 836 | return ret; |
| 837 | |
| 838 | /* We need to disable the AsyncFlip performance optimisations in order |
| 839 | * to use MI_WAIT_FOR_EVENT within the CS. It should already be |
| 840 | * programmed to '1' on all products. |
| 841 | * |
| 842 | * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv |
| 843 | */ |
| 844 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); |
| 845 | |
| 846 | ret = intel_init_pipe_control(ring); |
| 847 | if (ret) |
| 848 | return ret; |
| 849 | |
| 850 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); |
| 851 | |
| 852 | return ret; |
| 853 | } |
| 854 | |
Oscar Mateo | 1564858 | 2014-07-24 17:04:32 +0100 | [diff] [blame] | 855 | static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf, |
| 856 | u64 offset, unsigned flags) |
| 857 | { |
Oscar Mateo | 1564858 | 2014-07-24 17:04:32 +0100 | [diff] [blame] | 858 | bool ppgtt = !(flags & I915_DISPATCH_SECURE); |
| 859 | int ret; |
| 860 | |
| 861 | ret = intel_logical_ring_begin(ringbuf, 4); |
| 862 | if (ret) |
| 863 | return ret; |
| 864 | |
| 865 | /* FIXME(BDW): Address space and security selectors. */ |
| 866 | intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); |
| 867 | intel_logical_ring_emit(ringbuf, lower_32_bits(offset)); |
| 868 | intel_logical_ring_emit(ringbuf, upper_32_bits(offset)); |
| 869 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
| 870 | intel_logical_ring_advance(ringbuf); |
| 871 | |
| 872 | return 0; |
| 873 | } |
| 874 | |
Oscar Mateo | 73d477f | 2014-07-24 17:04:31 +0100 | [diff] [blame] | 875 | static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring) |
| 876 | { |
| 877 | struct drm_device *dev = ring->dev; |
| 878 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 879 | unsigned long flags; |
| 880 | |
| 881 | if (!dev->irq_enabled) |
| 882 | return false; |
| 883 | |
| 884 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
| 885 | if (ring->irq_refcount++ == 0) { |
| 886 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); |
| 887 | POSTING_READ(RING_IMR(ring->mmio_base)); |
| 888 | } |
| 889 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
| 890 | |
| 891 | return true; |
| 892 | } |
| 893 | |
| 894 | static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring) |
| 895 | { |
| 896 | struct drm_device *dev = ring->dev; |
| 897 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 898 | unsigned long flags; |
| 899 | |
| 900 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
| 901 | if (--ring->irq_refcount == 0) { |
| 902 | I915_WRITE_IMR(ring, ~ring->irq_keep_mask); |
| 903 | POSTING_READ(RING_IMR(ring->mmio_base)); |
| 904 | } |
| 905 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
| 906 | } |
| 907 | |
Oscar Mateo | 4712274 | 2014-07-24 17:04:28 +0100 | [diff] [blame] | 908 | static int gen8_emit_flush(struct intel_ringbuffer *ringbuf, |
| 909 | u32 invalidate_domains, |
| 910 | u32 unused) |
| 911 | { |
| 912 | struct intel_engine_cs *ring = ringbuf->ring; |
| 913 | struct drm_device *dev = ring->dev; |
| 914 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 915 | uint32_t cmd; |
| 916 | int ret; |
| 917 | |
| 918 | ret = intel_logical_ring_begin(ringbuf, 4); |
| 919 | if (ret) |
| 920 | return ret; |
| 921 | |
| 922 | cmd = MI_FLUSH_DW + 1; |
| 923 | |
| 924 | if (ring == &dev_priv->ring[VCS]) { |
| 925 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) |
| 926 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | |
| 927 | MI_FLUSH_DW_STORE_INDEX | |
| 928 | MI_FLUSH_DW_OP_STOREDW; |
| 929 | } else { |
| 930 | if (invalidate_domains & I915_GEM_DOMAIN_RENDER) |
| 931 | cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | |
| 932 | MI_FLUSH_DW_OP_STOREDW; |
| 933 | } |
| 934 | |
| 935 | intel_logical_ring_emit(ringbuf, cmd); |
| 936 | intel_logical_ring_emit(ringbuf, |
| 937 | I915_GEM_HWS_SCRATCH_ADDR | |
| 938 | MI_FLUSH_DW_USE_GTT); |
| 939 | intel_logical_ring_emit(ringbuf, 0); /* upper addr */ |
| 940 | intel_logical_ring_emit(ringbuf, 0); /* value */ |
| 941 | intel_logical_ring_advance(ringbuf); |
| 942 | |
| 943 | return 0; |
| 944 | } |
| 945 | |
| 946 | static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf, |
| 947 | u32 invalidate_domains, |
| 948 | u32 flush_domains) |
| 949 | { |
| 950 | struct intel_engine_cs *ring = ringbuf->ring; |
| 951 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
| 952 | u32 flags = 0; |
| 953 | int ret; |
| 954 | |
| 955 | flags |= PIPE_CONTROL_CS_STALL; |
| 956 | |
| 957 | if (flush_domains) { |
| 958 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
| 959 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
| 960 | } |
| 961 | |
| 962 | if (invalidate_domains) { |
| 963 | flags |= PIPE_CONTROL_TLB_INVALIDATE; |
| 964 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; |
| 965 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; |
| 966 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
| 967 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
| 968 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
| 969 | flags |= PIPE_CONTROL_QW_WRITE; |
| 970 | flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; |
| 971 | } |
| 972 | |
| 973 | ret = intel_logical_ring_begin(ringbuf, 6); |
| 974 | if (ret) |
| 975 | return ret; |
| 976 | |
| 977 | intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); |
| 978 | intel_logical_ring_emit(ringbuf, flags); |
| 979 | intel_logical_ring_emit(ringbuf, scratch_addr); |
| 980 | intel_logical_ring_emit(ringbuf, 0); |
| 981 | intel_logical_ring_emit(ringbuf, 0); |
| 982 | intel_logical_ring_emit(ringbuf, 0); |
| 983 | intel_logical_ring_advance(ringbuf); |
| 984 | |
| 985 | return 0; |
| 986 | } |
| 987 | |
Oscar Mateo | e94e37a | 2014-07-24 17:04:25 +0100 | [diff] [blame] | 988 | static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) |
| 989 | { |
| 990 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
| 991 | } |
| 992 | |
| 993 | static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno) |
| 994 | { |
| 995 | intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); |
| 996 | } |
| 997 | |
Oscar Mateo | 4da46e1 | 2014-07-24 17:04:27 +0100 | [diff] [blame] | 998 | static int gen8_emit_request(struct intel_ringbuffer *ringbuf) |
| 999 | { |
| 1000 | struct intel_engine_cs *ring = ringbuf->ring; |
| 1001 | u32 cmd; |
| 1002 | int ret; |
| 1003 | |
| 1004 | ret = intel_logical_ring_begin(ringbuf, 6); |
| 1005 | if (ret) |
| 1006 | return ret; |
| 1007 | |
| 1008 | cmd = MI_STORE_DWORD_IMM_GEN8; |
| 1009 | cmd |= MI_GLOBAL_GTT; |
| 1010 | |
| 1011 | intel_logical_ring_emit(ringbuf, cmd); |
| 1012 | intel_logical_ring_emit(ringbuf, |
| 1013 | (ring->status_page.gfx_addr + |
| 1014 | (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT))); |
| 1015 | intel_logical_ring_emit(ringbuf, 0); |
| 1016 | intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno); |
| 1017 | intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); |
| 1018 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
| 1019 | intel_logical_ring_advance_and_submit(ringbuf); |
| 1020 | |
| 1021 | return 0; |
| 1022 | } |
| 1023 | |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 1024 | void intel_logical_ring_cleanup(struct intel_engine_cs *ring) |
| 1025 | { |
Oscar Mateo | 9832b9d | 2014-07-24 17:04:30 +0100 | [diff] [blame] | 1026 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
| 1027 | |
Oscar Mateo | 48d8238 | 2014-07-24 17:04:23 +0100 | [diff] [blame] | 1028 | if (!intel_ring_initialized(ring)) |
| 1029 | return; |
| 1030 | |
Oscar Mateo | 9832b9d | 2014-07-24 17:04:30 +0100 | [diff] [blame] | 1031 | intel_logical_ring_stop(ring); |
| 1032 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); |
Oscar Mateo | 48d8238 | 2014-07-24 17:04:23 +0100 | [diff] [blame] | 1033 | ring->preallocated_lazy_request = NULL; |
| 1034 | ring->outstanding_lazy_seqno = 0; |
| 1035 | |
| 1036 | if (ring->cleanup) |
| 1037 | ring->cleanup(ring); |
| 1038 | |
| 1039 | i915_cmd_parser_fini_ring(ring); |
| 1040 | |
| 1041 | if (ring->status_page.obj) { |
| 1042 | kunmap(sg_page(ring->status_page.obj->pages->sgl)); |
| 1043 | ring->status_page.obj = NULL; |
| 1044 | } |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 1045 | } |
| 1046 | |
| 1047 | static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) |
| 1048 | { |
Oscar Mateo | 48d8238 | 2014-07-24 17:04:23 +0100 | [diff] [blame] | 1049 | int ret; |
| 1050 | struct intel_context *dctx = ring->default_context; |
| 1051 | struct drm_i915_gem_object *dctx_obj; |
| 1052 | |
| 1053 | /* Intentionally left blank. */ |
| 1054 | ring->buffer = NULL; |
| 1055 | |
| 1056 | ring->dev = dev; |
| 1057 | INIT_LIST_HEAD(&ring->active_list); |
| 1058 | INIT_LIST_HEAD(&ring->request_list); |
| 1059 | init_waitqueue_head(&ring->irq_queue); |
| 1060 | |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 1061 | INIT_LIST_HEAD(&ring->execlist_queue); |
| 1062 | spin_lock_init(&ring->execlist_lock); |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 1063 | ring->next_context_status_buffer = 0; |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 1064 | |
Oscar Mateo | 48d8238 | 2014-07-24 17:04:23 +0100 | [diff] [blame] | 1065 | ret = intel_lr_context_deferred_create(dctx, ring); |
| 1066 | if (ret) |
| 1067 | return ret; |
| 1068 | |
| 1069 | /* The status page is offset 0 from the context object in LRCs. */ |
| 1070 | dctx_obj = dctx->engine[ring->id].state; |
| 1071 | ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj); |
| 1072 | ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl)); |
| 1073 | if (ring->status_page.page_addr == NULL) |
| 1074 | return -ENOMEM; |
| 1075 | ring->status_page.obj = dctx_obj; |
| 1076 | |
| 1077 | ret = i915_cmd_parser_init_ring(ring); |
| 1078 | if (ret) |
| 1079 | return ret; |
| 1080 | |
| 1081 | if (ring->init) { |
| 1082 | ret = ring->init(ring); |
| 1083 | if (ret) |
| 1084 | return ret; |
| 1085 | } |
| 1086 | |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 1087 | return 0; |
| 1088 | } |
| 1089 | |
| 1090 | static int logical_render_ring_init(struct drm_device *dev) |
| 1091 | { |
| 1092 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1093 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; |
| 1094 | |
| 1095 | ring->name = "render ring"; |
| 1096 | ring->id = RCS; |
| 1097 | ring->mmio_base = RENDER_RING_BASE; |
| 1098 | ring->irq_enable_mask = |
| 1099 | GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT; |
Oscar Mateo | 73d477f | 2014-07-24 17:04:31 +0100 | [diff] [blame] | 1100 | ring->irq_keep_mask = |
| 1101 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT; |
| 1102 | if (HAS_L3_DPF(dev)) |
| 1103 | ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 1104 | |
Oscar Mateo | 9b1136d | 2014-07-24 17:04:24 +0100 | [diff] [blame] | 1105 | ring->init = gen8_init_render_ring; |
| 1106 | ring->cleanup = intel_fini_pipe_control; |
Oscar Mateo | e94e37a | 2014-07-24 17:04:25 +0100 | [diff] [blame] | 1107 | ring->get_seqno = gen8_get_seqno; |
| 1108 | ring->set_seqno = gen8_set_seqno; |
Oscar Mateo | 4da46e1 | 2014-07-24 17:04:27 +0100 | [diff] [blame] | 1109 | ring->emit_request = gen8_emit_request; |
Oscar Mateo | 4712274 | 2014-07-24 17:04:28 +0100 | [diff] [blame] | 1110 | ring->emit_flush = gen8_emit_flush_render; |
Oscar Mateo | 73d477f | 2014-07-24 17:04:31 +0100 | [diff] [blame] | 1111 | ring->irq_get = gen8_logical_ring_get_irq; |
| 1112 | ring->irq_put = gen8_logical_ring_put_irq; |
Oscar Mateo | 1564858 | 2014-07-24 17:04:32 +0100 | [diff] [blame] | 1113 | ring->emit_bb_start = gen8_emit_bb_start; |
Oscar Mateo | 9b1136d | 2014-07-24 17:04:24 +0100 | [diff] [blame] | 1114 | |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 1115 | return logical_ring_init(dev, ring); |
| 1116 | } |
| 1117 | |
| 1118 | static int logical_bsd_ring_init(struct drm_device *dev) |
| 1119 | { |
| 1120 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1121 | struct intel_engine_cs *ring = &dev_priv->ring[VCS]; |
| 1122 | |
| 1123 | ring->name = "bsd ring"; |
| 1124 | ring->id = VCS; |
| 1125 | ring->mmio_base = GEN6_BSD_RING_BASE; |
| 1126 | ring->irq_enable_mask = |
| 1127 | GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; |
Oscar Mateo | 73d477f | 2014-07-24 17:04:31 +0100 | [diff] [blame] | 1128 | ring->irq_keep_mask = |
| 1129 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 1130 | |
Oscar Mateo | 9b1136d | 2014-07-24 17:04:24 +0100 | [diff] [blame] | 1131 | ring->init = gen8_init_common_ring; |
Oscar Mateo | e94e37a | 2014-07-24 17:04:25 +0100 | [diff] [blame] | 1132 | ring->get_seqno = gen8_get_seqno; |
| 1133 | ring->set_seqno = gen8_set_seqno; |
Oscar Mateo | 4da46e1 | 2014-07-24 17:04:27 +0100 | [diff] [blame] | 1134 | ring->emit_request = gen8_emit_request; |
Oscar Mateo | 4712274 | 2014-07-24 17:04:28 +0100 | [diff] [blame] | 1135 | ring->emit_flush = gen8_emit_flush; |
Oscar Mateo | 73d477f | 2014-07-24 17:04:31 +0100 | [diff] [blame] | 1136 | ring->irq_get = gen8_logical_ring_get_irq; |
| 1137 | ring->irq_put = gen8_logical_ring_put_irq; |
Oscar Mateo | 1564858 | 2014-07-24 17:04:32 +0100 | [diff] [blame] | 1138 | ring->emit_bb_start = gen8_emit_bb_start; |
Oscar Mateo | 9b1136d | 2014-07-24 17:04:24 +0100 | [diff] [blame] | 1139 | |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 1140 | return logical_ring_init(dev, ring); |
| 1141 | } |
| 1142 | |
| 1143 | static int logical_bsd2_ring_init(struct drm_device *dev) |
| 1144 | { |
| 1145 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1146 | struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; |
| 1147 | |
| 1148 | ring->name = "bds2 ring"; |
| 1149 | ring->id = VCS2; |
| 1150 | ring->mmio_base = GEN8_BSD2_RING_BASE; |
| 1151 | ring->irq_enable_mask = |
| 1152 | GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; |
Oscar Mateo | 73d477f | 2014-07-24 17:04:31 +0100 | [diff] [blame] | 1153 | ring->irq_keep_mask = |
| 1154 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 1155 | |
Oscar Mateo | 9b1136d | 2014-07-24 17:04:24 +0100 | [diff] [blame] | 1156 | ring->init = gen8_init_common_ring; |
Oscar Mateo | e94e37a | 2014-07-24 17:04:25 +0100 | [diff] [blame] | 1157 | ring->get_seqno = gen8_get_seqno; |
| 1158 | ring->set_seqno = gen8_set_seqno; |
Oscar Mateo | 4da46e1 | 2014-07-24 17:04:27 +0100 | [diff] [blame] | 1159 | ring->emit_request = gen8_emit_request; |
Oscar Mateo | 4712274 | 2014-07-24 17:04:28 +0100 | [diff] [blame] | 1160 | ring->emit_flush = gen8_emit_flush; |
Oscar Mateo | 73d477f | 2014-07-24 17:04:31 +0100 | [diff] [blame] | 1161 | ring->irq_get = gen8_logical_ring_get_irq; |
| 1162 | ring->irq_put = gen8_logical_ring_put_irq; |
Oscar Mateo | 1564858 | 2014-07-24 17:04:32 +0100 | [diff] [blame] | 1163 | ring->emit_bb_start = gen8_emit_bb_start; |
Oscar Mateo | 9b1136d | 2014-07-24 17:04:24 +0100 | [diff] [blame] | 1164 | |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 1165 | return logical_ring_init(dev, ring); |
| 1166 | } |
| 1167 | |
| 1168 | static int logical_blt_ring_init(struct drm_device *dev) |
| 1169 | { |
| 1170 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1171 | struct intel_engine_cs *ring = &dev_priv->ring[BCS]; |
| 1172 | |
| 1173 | ring->name = "blitter ring"; |
| 1174 | ring->id = BCS; |
| 1175 | ring->mmio_base = BLT_RING_BASE; |
| 1176 | ring->irq_enable_mask = |
| 1177 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; |
Oscar Mateo | 73d477f | 2014-07-24 17:04:31 +0100 | [diff] [blame] | 1178 | ring->irq_keep_mask = |
| 1179 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 1180 | |
Oscar Mateo | 9b1136d | 2014-07-24 17:04:24 +0100 | [diff] [blame] | 1181 | ring->init = gen8_init_common_ring; |
Oscar Mateo | e94e37a | 2014-07-24 17:04:25 +0100 | [diff] [blame] | 1182 | ring->get_seqno = gen8_get_seqno; |
| 1183 | ring->set_seqno = gen8_set_seqno; |
Oscar Mateo | 4da46e1 | 2014-07-24 17:04:27 +0100 | [diff] [blame] | 1184 | ring->emit_request = gen8_emit_request; |
Oscar Mateo | 4712274 | 2014-07-24 17:04:28 +0100 | [diff] [blame] | 1185 | ring->emit_flush = gen8_emit_flush; |
Oscar Mateo | 73d477f | 2014-07-24 17:04:31 +0100 | [diff] [blame] | 1186 | ring->irq_get = gen8_logical_ring_get_irq; |
| 1187 | ring->irq_put = gen8_logical_ring_put_irq; |
Oscar Mateo | 1564858 | 2014-07-24 17:04:32 +0100 | [diff] [blame] | 1188 | ring->emit_bb_start = gen8_emit_bb_start; |
Oscar Mateo | 9b1136d | 2014-07-24 17:04:24 +0100 | [diff] [blame] | 1189 | |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 1190 | return logical_ring_init(dev, ring); |
| 1191 | } |
| 1192 | |
| 1193 | static int logical_vebox_ring_init(struct drm_device *dev) |
| 1194 | { |
| 1195 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1196 | struct intel_engine_cs *ring = &dev_priv->ring[VECS]; |
| 1197 | |
| 1198 | ring->name = "video enhancement ring"; |
| 1199 | ring->id = VECS; |
| 1200 | ring->mmio_base = VEBOX_RING_BASE; |
| 1201 | ring->irq_enable_mask = |
| 1202 | GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; |
Oscar Mateo | 73d477f | 2014-07-24 17:04:31 +0100 | [diff] [blame] | 1203 | ring->irq_keep_mask = |
| 1204 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 1205 | |
Oscar Mateo | 9b1136d | 2014-07-24 17:04:24 +0100 | [diff] [blame] | 1206 | ring->init = gen8_init_common_ring; |
Oscar Mateo | e94e37a | 2014-07-24 17:04:25 +0100 | [diff] [blame] | 1207 | ring->get_seqno = gen8_get_seqno; |
| 1208 | ring->set_seqno = gen8_set_seqno; |
Oscar Mateo | 4da46e1 | 2014-07-24 17:04:27 +0100 | [diff] [blame] | 1209 | ring->emit_request = gen8_emit_request; |
Oscar Mateo | 4712274 | 2014-07-24 17:04:28 +0100 | [diff] [blame] | 1210 | ring->emit_flush = gen8_emit_flush; |
Oscar Mateo | 73d477f | 2014-07-24 17:04:31 +0100 | [diff] [blame] | 1211 | ring->irq_get = gen8_logical_ring_get_irq; |
| 1212 | ring->irq_put = gen8_logical_ring_put_irq; |
Oscar Mateo | 1564858 | 2014-07-24 17:04:32 +0100 | [diff] [blame] | 1213 | ring->emit_bb_start = gen8_emit_bb_start; |
Oscar Mateo | 9b1136d | 2014-07-24 17:04:24 +0100 | [diff] [blame] | 1214 | |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 1215 | return logical_ring_init(dev, ring); |
| 1216 | } |
| 1217 | |
| 1218 | int intel_logical_rings_init(struct drm_device *dev) |
| 1219 | { |
| 1220 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1221 | int ret; |
| 1222 | |
| 1223 | ret = logical_render_ring_init(dev); |
| 1224 | if (ret) |
| 1225 | return ret; |
| 1226 | |
| 1227 | if (HAS_BSD(dev)) { |
| 1228 | ret = logical_bsd_ring_init(dev); |
| 1229 | if (ret) |
| 1230 | goto cleanup_render_ring; |
| 1231 | } |
| 1232 | |
| 1233 | if (HAS_BLT(dev)) { |
| 1234 | ret = logical_blt_ring_init(dev); |
| 1235 | if (ret) |
| 1236 | goto cleanup_bsd_ring; |
| 1237 | } |
| 1238 | |
| 1239 | if (HAS_VEBOX(dev)) { |
| 1240 | ret = logical_vebox_ring_init(dev); |
| 1241 | if (ret) |
| 1242 | goto cleanup_blt_ring; |
| 1243 | } |
| 1244 | |
| 1245 | if (HAS_BSD2(dev)) { |
| 1246 | ret = logical_bsd2_ring_init(dev); |
| 1247 | if (ret) |
| 1248 | goto cleanup_vebox_ring; |
| 1249 | } |
| 1250 | |
| 1251 | ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); |
| 1252 | if (ret) |
| 1253 | goto cleanup_bsd2_ring; |
| 1254 | |
| 1255 | return 0; |
| 1256 | |
| 1257 | cleanup_bsd2_ring: |
| 1258 | intel_logical_ring_cleanup(&dev_priv->ring[VCS2]); |
| 1259 | cleanup_vebox_ring: |
| 1260 | intel_logical_ring_cleanup(&dev_priv->ring[VECS]); |
| 1261 | cleanup_blt_ring: |
| 1262 | intel_logical_ring_cleanup(&dev_priv->ring[BCS]); |
| 1263 | cleanup_bsd_ring: |
| 1264 | intel_logical_ring_cleanup(&dev_priv->ring[VCS]); |
| 1265 | cleanup_render_ring: |
| 1266 | intel_logical_ring_cleanup(&dev_priv->ring[RCS]); |
| 1267 | |
| 1268 | return ret; |
| 1269 | } |
| 1270 | |
Oscar Mateo | 8670d6f | 2014-07-24 17:04:17 +0100 | [diff] [blame] | 1271 | static int |
| 1272 | populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, |
| 1273 | struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) |
| 1274 | { |
| 1275 | struct drm_i915_gem_object *ring_obj = ringbuf->obj; |
Daniel Vetter | ae6c480 | 2014-08-06 15:04:53 +0200 | [diff] [blame] | 1276 | struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; |
Oscar Mateo | 8670d6f | 2014-07-24 17:04:17 +0100 | [diff] [blame] | 1277 | struct page *page; |
| 1278 | uint32_t *reg_state; |
| 1279 | int ret; |
| 1280 | |
| 1281 | ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true); |
| 1282 | if (ret) { |
| 1283 | DRM_DEBUG_DRIVER("Could not set to CPU domain\n"); |
| 1284 | return ret; |
| 1285 | } |
| 1286 | |
| 1287 | ret = i915_gem_object_get_pages(ctx_obj); |
| 1288 | if (ret) { |
| 1289 | DRM_DEBUG_DRIVER("Could not get object pages\n"); |
| 1290 | return ret; |
| 1291 | } |
| 1292 | |
| 1293 | i915_gem_object_pin_pages(ctx_obj); |
| 1294 | |
| 1295 | /* The second page of the context object contains some fields which must |
| 1296 | * be set up prior to the first execution. */ |
| 1297 | page = i915_gem_object_get_page(ctx_obj, 1); |
| 1298 | reg_state = kmap_atomic(page); |
| 1299 | |
| 1300 | /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM |
| 1301 | * commands followed by (reg, value) pairs. The values we are setting here are |
| 1302 | * only for the first context restore: on a subsequent save, the GPU will |
| 1303 | * recreate this batchbuffer with new values (including all the missing |
| 1304 | * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */ |
| 1305 | if (ring->id == RCS) |
| 1306 | reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14); |
| 1307 | else |
| 1308 | reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11); |
| 1309 | reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED; |
| 1310 | reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring); |
| 1311 | reg_state[CTX_CONTEXT_CONTROL+1] = |
| 1312 | _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT); |
| 1313 | reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base); |
| 1314 | reg_state[CTX_RING_HEAD+1] = 0; |
| 1315 | reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); |
| 1316 | reg_state[CTX_RING_TAIL+1] = 0; |
| 1317 | reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base); |
| 1318 | reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj); |
| 1319 | reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); |
| 1320 | reg_state[CTX_RING_BUFFER_CONTROL+1] = |
| 1321 | ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID; |
| 1322 | reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168; |
| 1323 | reg_state[CTX_BB_HEAD_U+1] = 0; |
| 1324 | reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140; |
| 1325 | reg_state[CTX_BB_HEAD_L+1] = 0; |
| 1326 | reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110; |
| 1327 | reg_state[CTX_BB_STATE+1] = (1<<5); |
| 1328 | reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c; |
| 1329 | reg_state[CTX_SECOND_BB_HEAD_U+1] = 0; |
| 1330 | reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114; |
| 1331 | reg_state[CTX_SECOND_BB_HEAD_L+1] = 0; |
| 1332 | reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118; |
| 1333 | reg_state[CTX_SECOND_BB_STATE+1] = 0; |
| 1334 | if (ring->id == RCS) { |
| 1335 | /* TODO: according to BSpec, the register state context |
| 1336 | * for CHV does not have these. OTOH, these registers do |
| 1337 | * exist in CHV. I'm waiting for a clarification */ |
| 1338 | reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0; |
| 1339 | reg_state[CTX_BB_PER_CTX_PTR+1] = 0; |
| 1340 | reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4; |
| 1341 | reg_state[CTX_RCS_INDIRECT_CTX+1] = 0; |
| 1342 | reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8; |
| 1343 | reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0; |
| 1344 | } |
| 1345 | reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9); |
| 1346 | reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED; |
| 1347 | reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8; |
| 1348 | reg_state[CTX_CTX_TIMESTAMP+1] = 0; |
| 1349 | reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3); |
| 1350 | reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3); |
| 1351 | reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2); |
| 1352 | reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2); |
| 1353 | reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1); |
| 1354 | reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1); |
| 1355 | reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); |
| 1356 | reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); |
| 1357 | reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]); |
| 1358 | reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]); |
| 1359 | reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]); |
| 1360 | reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]); |
| 1361 | reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]); |
| 1362 | reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]); |
| 1363 | reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]); |
| 1364 | reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]); |
| 1365 | if (ring->id == RCS) { |
| 1366 | reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); |
| 1367 | reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8; |
| 1368 | reg_state[CTX_R_PWR_CLK_STATE+1] = 0; |
| 1369 | } |
| 1370 | |
| 1371 | kunmap_atomic(reg_state); |
| 1372 | |
| 1373 | ctx_obj->dirty = 1; |
| 1374 | set_page_dirty(page); |
| 1375 | i915_gem_object_unpin_pages(ctx_obj); |
| 1376 | |
| 1377 | return 0; |
| 1378 | } |
| 1379 | |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 1380 | void intel_lr_context_free(struct intel_context *ctx) |
| 1381 | { |
Oscar Mateo | 8c857917 | 2014-07-24 17:04:14 +0100 | [diff] [blame] | 1382 | int i; |
| 1383 | |
| 1384 | for (i = 0; i < I915_NUM_RINGS; i++) { |
| 1385 | struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; |
Oscar Mateo | 84c2377 | 2014-07-24 17:04:15 +0100 | [diff] [blame] | 1386 | struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; |
| 1387 | |
Oscar Mateo | 8c857917 | 2014-07-24 17:04:14 +0100 | [diff] [blame] | 1388 | if (ctx_obj) { |
Oscar Mateo | 84c2377 | 2014-07-24 17:04:15 +0100 | [diff] [blame] | 1389 | intel_destroy_ringbuffer_obj(ringbuf); |
| 1390 | kfree(ringbuf); |
Oscar Mateo | 8c857917 | 2014-07-24 17:04:14 +0100 | [diff] [blame] | 1391 | i915_gem_object_ggtt_unpin(ctx_obj); |
| 1392 | drm_gem_object_unreference(&ctx_obj->base); |
| 1393 | } |
| 1394 | } |
| 1395 | } |
| 1396 | |
| 1397 | static uint32_t get_lr_context_size(struct intel_engine_cs *ring) |
| 1398 | { |
| 1399 | int ret = 0; |
| 1400 | |
| 1401 | WARN_ON(INTEL_INFO(ring->dev)->gen != 8); |
| 1402 | |
| 1403 | switch (ring->id) { |
| 1404 | case RCS: |
| 1405 | ret = GEN8_LR_CONTEXT_RENDER_SIZE; |
| 1406 | break; |
| 1407 | case VCS: |
| 1408 | case BCS: |
| 1409 | case VECS: |
| 1410 | case VCS2: |
| 1411 | ret = GEN8_LR_CONTEXT_OTHER_SIZE; |
| 1412 | break; |
| 1413 | } |
| 1414 | |
| 1415 | return ret; |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 1416 | } |
| 1417 | |
| 1418 | int intel_lr_context_deferred_create(struct intel_context *ctx, |
| 1419 | struct intel_engine_cs *ring) |
| 1420 | { |
Oscar Mateo | 8c857917 | 2014-07-24 17:04:14 +0100 | [diff] [blame] | 1421 | struct drm_device *dev = ring->dev; |
| 1422 | struct drm_i915_gem_object *ctx_obj; |
| 1423 | uint32_t context_size; |
Oscar Mateo | 84c2377 | 2014-07-24 17:04:15 +0100 | [diff] [blame] | 1424 | struct intel_ringbuffer *ringbuf; |
Oscar Mateo | 8c857917 | 2014-07-24 17:04:14 +0100 | [diff] [blame] | 1425 | int ret; |
| 1426 | |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 1427 | WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); |
Oscar Mateo | 48d8238 | 2014-07-24 17:04:23 +0100 | [diff] [blame] | 1428 | if (ctx->engine[ring->id].state) |
| 1429 | return 0; |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 1430 | |
Oscar Mateo | 8c857917 | 2014-07-24 17:04:14 +0100 | [diff] [blame] | 1431 | context_size = round_up(get_lr_context_size(ring), 4096); |
| 1432 | |
| 1433 | ctx_obj = i915_gem_alloc_context_obj(dev, context_size); |
| 1434 | if (IS_ERR(ctx_obj)) { |
| 1435 | ret = PTR_ERR(ctx_obj); |
| 1436 | DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret); |
| 1437 | return ret; |
| 1438 | } |
| 1439 | |
| 1440 | ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0); |
| 1441 | if (ret) { |
| 1442 | DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret); |
| 1443 | drm_gem_object_unreference(&ctx_obj->base); |
| 1444 | return ret; |
| 1445 | } |
| 1446 | |
Oscar Mateo | 84c2377 | 2014-07-24 17:04:15 +0100 | [diff] [blame] | 1447 | ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); |
| 1448 | if (!ringbuf) { |
| 1449 | DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", |
| 1450 | ring->name); |
| 1451 | i915_gem_object_ggtt_unpin(ctx_obj); |
| 1452 | drm_gem_object_unreference(&ctx_obj->base); |
| 1453 | ret = -ENOMEM; |
| 1454 | return ret; |
| 1455 | } |
| 1456 | |
Daniel Vetter | 0c7dd53 | 2014-08-11 16:17:44 +0200 | [diff] [blame] | 1457 | ringbuf->ring = ring; |
Oscar Mateo | 582d67f | 2014-07-24 17:04:16 +0100 | [diff] [blame] | 1458 | ringbuf->FIXME_lrc_ctx = ctx; |
| 1459 | |
Oscar Mateo | 84c2377 | 2014-07-24 17:04:15 +0100 | [diff] [blame] | 1460 | ringbuf->size = 32 * PAGE_SIZE; |
| 1461 | ringbuf->effective_size = ringbuf->size; |
| 1462 | ringbuf->head = 0; |
| 1463 | ringbuf->tail = 0; |
| 1464 | ringbuf->space = ringbuf->size; |
| 1465 | ringbuf->last_retired_head = -1; |
| 1466 | |
| 1467 | /* TODO: For now we put this in the mappable region so that we can reuse |
| 1468 | * the existing ringbuffer code which ioremaps it. When we start |
| 1469 | * creating many contexts, this will no longer work and we must switch |
| 1470 | * to a kmapish interface. |
| 1471 | */ |
| 1472 | ret = intel_alloc_ringbuffer_obj(dev, ringbuf); |
| 1473 | if (ret) { |
| 1474 | DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n", |
| 1475 | ring->name, ret); |
Oscar Mateo | 8670d6f | 2014-07-24 17:04:17 +0100 | [diff] [blame] | 1476 | goto error; |
| 1477 | } |
| 1478 | |
| 1479 | ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); |
| 1480 | if (ret) { |
| 1481 | DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); |
| 1482 | intel_destroy_ringbuffer_obj(ringbuf); |
| 1483 | goto error; |
Oscar Mateo | 84c2377 | 2014-07-24 17:04:15 +0100 | [diff] [blame] | 1484 | } |
| 1485 | |
| 1486 | ctx->engine[ring->id].ringbuf = ringbuf; |
Oscar Mateo | 8c857917 | 2014-07-24 17:04:14 +0100 | [diff] [blame] | 1487 | ctx->engine[ring->id].state = ctx_obj; |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 1488 | |
| 1489 | return 0; |
Oscar Mateo | 8670d6f | 2014-07-24 17:04:17 +0100 | [diff] [blame] | 1490 | |
| 1491 | error: |
| 1492 | kfree(ringbuf); |
| 1493 | i915_gem_object_ggtt_unpin(ctx_obj); |
| 1494 | drm_gem_object_unreference(&ctx_obj->base); |
| 1495 | return ret; |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 1496 | } |