blob: aa81fd41b9c1ed60b4c69dd5db0d465b26cc9933 [file] [log] [blame]
Oscar Mateob20385f2014-07-24 17:04:10 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
31/*
32 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
33 * These expanded contexts enable a number of new abilities, especially
34 * "Execlists" (also implemented in this file).
35 *
36 * Execlists are the new method by which, on gen8+ hardware, workloads are
37 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
38 */
39
40#include <drm/drmP.h>
41#include <drm/i915_drm.h>
42#include "i915_drv.h"
Oscar Mateo127f1002014-07-24 17:04:11 +010043
Oscar Mateo8c8579172014-07-24 17:04:14 +010044#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
45#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
46
47#define GEN8_LR_CONTEXT_ALIGN 4096
48
Oscar Mateo8670d6f2014-07-24 17:04:17 +010049#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
Ben Widawsky84b790f2014-07-24 17:04:36 +010050#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
Oscar Mateo8670d6f2014-07-24 17:04:17 +010051#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
52
53#define CTX_LRI_HEADER_0 0x01
54#define CTX_CONTEXT_CONTROL 0x02
55#define CTX_RING_HEAD 0x04
56#define CTX_RING_TAIL 0x06
57#define CTX_RING_BUFFER_START 0x08
58#define CTX_RING_BUFFER_CONTROL 0x0a
59#define CTX_BB_HEAD_U 0x0c
60#define CTX_BB_HEAD_L 0x0e
61#define CTX_BB_STATE 0x10
62#define CTX_SECOND_BB_HEAD_U 0x12
63#define CTX_SECOND_BB_HEAD_L 0x14
64#define CTX_SECOND_BB_STATE 0x16
65#define CTX_BB_PER_CTX_PTR 0x18
66#define CTX_RCS_INDIRECT_CTX 0x1a
67#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
68#define CTX_LRI_HEADER_1 0x21
69#define CTX_CTX_TIMESTAMP 0x22
70#define CTX_PDP3_UDW 0x24
71#define CTX_PDP3_LDW 0x26
72#define CTX_PDP2_UDW 0x28
73#define CTX_PDP2_LDW 0x2a
74#define CTX_PDP1_UDW 0x2c
75#define CTX_PDP1_LDW 0x2e
76#define CTX_PDP0_UDW 0x30
77#define CTX_PDP0_LDW 0x32
78#define CTX_LRI_HEADER_2 0x41
79#define CTX_R_PWR_CLK_STATE 0x42
80#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
81
Ben Widawsky84b790f2014-07-24 17:04:36 +010082#define GEN8_CTX_VALID (1<<0)
83#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
84#define GEN8_CTX_FORCE_RESTORE (1<<2)
85#define GEN8_CTX_L3LLC_COHERENT (1<<5)
86#define GEN8_CTX_PRIVILEGE (1<<8)
87enum {
88 ADVANCED_CONTEXT = 0,
89 LEGACY_CONTEXT,
90 ADVANCED_AD_CONTEXT,
91 LEGACY_64B_CONTEXT
92};
93#define GEN8_CTX_MODE_SHIFT 3
94enum {
95 FAULT_AND_HANG = 0,
96 FAULT_AND_HALT, /* Debug only */
97 FAULT_AND_STREAM,
98 FAULT_AND_CONTINUE /* Unsupported */
99};
100#define GEN8_CTX_ID_SHIFT 32
101
Oscar Mateo127f1002014-07-24 17:04:11 +0100102int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
103{
Daniel Vetterbd84b1e2014-08-11 15:57:57 +0200104 WARN_ON(i915.enable_ppgtt == -1);
105
Oscar Mateo127f1002014-07-24 17:04:11 +0100106 if (enable_execlists == 0)
107 return 0;
108
Oscar Mateo14bf9932014-07-24 17:04:34 +0100109 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
110 i915.use_mmio_flip >= 0)
Oscar Mateo127f1002014-07-24 17:04:11 +0100111 return 1;
112
113 return 0;
114}
Oscar Mateoede7d422014-07-24 17:04:12 +0100115
Ben Widawsky84b790f2014-07-24 17:04:36 +0100116u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
117{
118 u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj);
119
120 /* LRCA is required to be 4K aligned so the more significant 20 bits
121 * are globally unique */
122 return lrca >> 12;
123}
124
125static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
126{
127 uint64_t desc;
128 uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
129 BUG_ON(lrca & 0xFFFFFFFF00000FFFULL);
130
131 desc = GEN8_CTX_VALID;
132 desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
133 desc |= GEN8_CTX_L3LLC_COHERENT;
134 desc |= GEN8_CTX_PRIVILEGE;
135 desc |= lrca;
136 desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
137
138 /* TODO: WaDisableLiteRestore when we start using semaphore
139 * signalling between Command Streamers */
140 /* desc |= GEN8_CTX_FORCE_RESTORE; */
141
142 return desc;
143}
144
145static void execlists_elsp_write(struct intel_engine_cs *ring,
146 struct drm_i915_gem_object *ctx_obj0,
147 struct drm_i915_gem_object *ctx_obj1)
148{
149 struct drm_i915_private *dev_priv = ring->dev->dev_private;
150 uint64_t temp = 0;
151 uint32_t desc[4];
152
153 /* XXX: You must always write both descriptors in the order below. */
154 if (ctx_obj1)
155 temp = execlists_ctx_descriptor(ctx_obj1);
156 else
157 temp = 0;
158 desc[1] = (u32)(temp >> 32);
159 desc[0] = (u32)temp;
160
161 temp = execlists_ctx_descriptor(ctx_obj0);
162 desc[3] = (u32)(temp >> 32);
163 desc[2] = (u32)temp;
164
165 /* Set Force Wakeup bit to prevent GT from entering C6 while
166 * ELSP writes are in progress */
167 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
168
169 I915_WRITE(RING_ELSP(ring), desc[1]);
170 I915_WRITE(RING_ELSP(ring), desc[0]);
171 I915_WRITE(RING_ELSP(ring), desc[3]);
172 /* The context is automatically loaded after the following */
173 I915_WRITE(RING_ELSP(ring), desc[2]);
174
175 /* ELSP is a wo register, so use another nearby reg for posting instead */
176 POSTING_READ(RING_EXECLIST_STATUS(ring));
177
178 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
179}
180
181static int execlists_submit_context(struct intel_engine_cs *ring,
182 struct intel_context *to0, u32 tail0,
183 struct intel_context *to1, u32 tail1)
184{
185 struct drm_i915_gem_object *ctx_obj0;
186 struct drm_i915_gem_object *ctx_obj1 = NULL;
187
188 ctx_obj0 = to0->engine[ring->id].state;
189 BUG_ON(!ctx_obj0);
190 BUG_ON(!i915_gem_obj_is_pinned(ctx_obj0));
191
192 if (to1) {
193 ctx_obj1 = to1->engine[ring->id].state;
194 BUG_ON(!ctx_obj1);
195 BUG_ON(!i915_gem_obj_is_pinned(ctx_obj1));
196 }
197
198 execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
199
200 return 0;
201}
202
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100203static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
204{
205 struct intel_engine_cs *ring = ringbuf->ring;
206 uint32_t flush_domains;
207 int ret;
208
209 flush_domains = 0;
210 if (ring->gpu_caches_dirty)
211 flush_domains = I915_GEM_GPU_DOMAINS;
212
213 ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
214 if (ret)
215 return ret;
216
217 ring->gpu_caches_dirty = false;
218 return 0;
219}
220
221static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
222 struct list_head *vmas)
223{
224 struct intel_engine_cs *ring = ringbuf->ring;
225 struct i915_vma *vma;
226 uint32_t flush_domains = 0;
227 bool flush_chipset = false;
228 int ret;
229
230 list_for_each_entry(vma, vmas, exec_list) {
231 struct drm_i915_gem_object *obj = vma->obj;
232
233 ret = i915_gem_object_sync(obj, ring);
234 if (ret)
235 return ret;
236
237 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
238 flush_chipset |= i915_gem_clflush_object(obj, false);
239
240 flush_domains |= obj->base.write_domain;
241 }
242
243 if (flush_domains & I915_GEM_DOMAIN_GTT)
244 wmb();
245
246 /* Unconditionally invalidate gpu caches and ensure that we do flush
247 * any residual writes from the previous batch.
248 */
249 return logical_ring_invalidate_all_caches(ringbuf);
250}
251
Oscar Mateo454afeb2014-07-24 17:04:22 +0100252int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
253 struct intel_engine_cs *ring,
254 struct intel_context *ctx,
255 struct drm_i915_gem_execbuffer2 *args,
256 struct list_head *vmas,
257 struct drm_i915_gem_object *batch_obj,
258 u64 exec_start, u32 flags)
259{
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100260 struct drm_i915_private *dev_priv = dev->dev_private;
261 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
262 int instp_mode;
263 u32 instp_mask;
264 int ret;
265
266 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
267 instp_mask = I915_EXEC_CONSTANTS_MASK;
268 switch (instp_mode) {
269 case I915_EXEC_CONSTANTS_REL_GENERAL:
270 case I915_EXEC_CONSTANTS_ABSOLUTE:
271 case I915_EXEC_CONSTANTS_REL_SURFACE:
272 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
273 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
274 return -EINVAL;
275 }
276
277 if (instp_mode != dev_priv->relative_constants_mode) {
278 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
279 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
280 return -EINVAL;
281 }
282
283 /* The HW changed the meaning on this bit on gen6 */
284 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
285 }
286 break;
287 default:
288 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
289 return -EINVAL;
290 }
291
292 if (args->num_cliprects != 0) {
293 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
294 return -EINVAL;
295 } else {
296 if (args->DR4 == 0xffffffff) {
297 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
298 args->DR4 = 0;
299 }
300
301 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
302 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
303 return -EINVAL;
304 }
305 }
306
307 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
308 DRM_DEBUG("sol reset is gen7 only\n");
309 return -EINVAL;
310 }
311
312 ret = execlists_move_to_gpu(ringbuf, vmas);
313 if (ret)
314 return ret;
315
316 if (ring == &dev_priv->ring[RCS] &&
317 instp_mode != dev_priv->relative_constants_mode) {
318 ret = intel_logical_ring_begin(ringbuf, 4);
319 if (ret)
320 return ret;
321
322 intel_logical_ring_emit(ringbuf, MI_NOOP);
323 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
324 intel_logical_ring_emit(ringbuf, INSTPM);
325 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
326 intel_logical_ring_advance(ringbuf);
327
328 dev_priv->relative_constants_mode = instp_mode;
329 }
330
331 ret = ring->emit_bb_start(ringbuf, exec_start, flags);
332 if (ret)
333 return ret;
334
335 i915_gem_execbuffer_move_to_active(vmas, ring);
336 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
337
Oscar Mateo454afeb2014-07-24 17:04:22 +0100338 return 0;
339}
340
341void intel_logical_ring_stop(struct intel_engine_cs *ring)
342{
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100343 struct drm_i915_private *dev_priv = ring->dev->dev_private;
344 int ret;
345
346 if (!intel_ring_initialized(ring))
347 return;
348
349 ret = intel_ring_idle(ring);
350 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
351 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
352 ring->name, ret);
353
354 /* TODO: Is this correct with Execlists enabled? */
355 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
356 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
357 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
358 return;
359 }
360 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
Oscar Mateo454afeb2014-07-24 17:04:22 +0100361}
362
Oscar Mateo48e29f52014-07-24 17:04:29 +0100363int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
364{
365 struct intel_engine_cs *ring = ringbuf->ring;
366 int ret;
367
368 if (!ring->gpu_caches_dirty)
369 return 0;
370
371 ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
372 if (ret)
373 return ret;
374
375 ring->gpu_caches_dirty = false;
376 return 0;
377}
378
Oscar Mateo82e104c2014-07-24 17:04:26 +0100379void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
380{
Ben Widawsky84b790f2014-07-24 17:04:36 +0100381 struct intel_engine_cs *ring = ringbuf->ring;
382 struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
383
Oscar Mateo82e104c2014-07-24 17:04:26 +0100384 intel_logical_ring_advance(ringbuf);
385
Ben Widawsky84b790f2014-07-24 17:04:36 +0100386 if (intel_ring_stopped(ring))
Oscar Mateo82e104c2014-07-24 17:04:26 +0100387 return;
388
Ben Widawsky84b790f2014-07-24 17:04:36 +0100389 /* FIXME: too cheeky, we don't even check if the ELSP is ready */
390 execlists_submit_context(ring, ctx, ringbuf->tail, NULL, 0);
Oscar Mateo82e104c2014-07-24 17:04:26 +0100391}
392
Oscar Mateo48e29f52014-07-24 17:04:29 +0100393static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
394 struct intel_context *ctx)
Oscar Mateo82e104c2014-07-24 17:04:26 +0100395{
396 if (ring->outstanding_lazy_seqno)
397 return 0;
398
399 if (ring->preallocated_lazy_request == NULL) {
400 struct drm_i915_gem_request *request;
401
402 request = kmalloc(sizeof(*request), GFP_KERNEL);
403 if (request == NULL)
404 return -ENOMEM;
405
Oscar Mateo48e29f52014-07-24 17:04:29 +0100406 /* Hold a reference to the context this request belongs to
407 * (we will need it when the time comes to emit/retire the
408 * request).
409 */
410 request->ctx = ctx;
411 i915_gem_context_reference(request->ctx);
412
Oscar Mateo82e104c2014-07-24 17:04:26 +0100413 ring->preallocated_lazy_request = request;
414 }
415
416 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
417}
418
419static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
420 int bytes)
421{
422 struct intel_engine_cs *ring = ringbuf->ring;
423 struct drm_i915_gem_request *request;
424 u32 seqno = 0;
425 int ret;
426
427 if (ringbuf->last_retired_head != -1) {
428 ringbuf->head = ringbuf->last_retired_head;
429 ringbuf->last_retired_head = -1;
430
431 ringbuf->space = intel_ring_space(ringbuf);
432 if (ringbuf->space >= bytes)
433 return 0;
434 }
435
436 list_for_each_entry(request, &ring->request_list, list) {
437 if (__intel_ring_space(request->tail, ringbuf->tail,
438 ringbuf->size) >= bytes) {
439 seqno = request->seqno;
440 break;
441 }
442 }
443
444 if (seqno == 0)
445 return -ENOSPC;
446
447 ret = i915_wait_seqno(ring, seqno);
448 if (ret)
449 return ret;
450
Oscar Mateo82e104c2014-07-24 17:04:26 +0100451 i915_gem_retire_requests_ring(ring);
452 ringbuf->head = ringbuf->last_retired_head;
453 ringbuf->last_retired_head = -1;
454
455 ringbuf->space = intel_ring_space(ringbuf);
456 return 0;
457}
458
459static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
460 int bytes)
461{
462 struct intel_engine_cs *ring = ringbuf->ring;
463 struct drm_device *dev = ring->dev;
464 struct drm_i915_private *dev_priv = dev->dev_private;
465 unsigned long end;
466 int ret;
467
468 ret = logical_ring_wait_request(ringbuf, bytes);
469 if (ret != -ENOSPC)
470 return ret;
471
472 /* Force the context submission in case we have been skipping it */
473 intel_logical_ring_advance_and_submit(ringbuf);
474
475 /* With GEM the hangcheck timer should kick us out of the loop,
476 * leaving it early runs the risk of corrupting GEM state (due
477 * to running on almost untested codepaths). But on resume
478 * timers don't work yet, so prevent a complete hang in that
479 * case by choosing an insanely large timeout. */
480 end = jiffies + 60 * HZ;
481
482 do {
483 ringbuf->head = I915_READ_HEAD(ring);
484 ringbuf->space = intel_ring_space(ringbuf);
485 if (ringbuf->space >= bytes) {
486 ret = 0;
487 break;
488 }
489
490 msleep(1);
491
492 if (dev_priv->mm.interruptible && signal_pending(current)) {
493 ret = -ERESTARTSYS;
494 break;
495 }
496
497 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
498 dev_priv->mm.interruptible);
499 if (ret)
500 break;
501
502 if (time_after(jiffies, end)) {
503 ret = -EBUSY;
504 break;
505 }
506 } while (1);
507
508 return ret;
509}
510
511static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
512{
513 uint32_t __iomem *virt;
514 int rem = ringbuf->size - ringbuf->tail;
515
516 if (ringbuf->space < rem) {
517 int ret = logical_ring_wait_for_space(ringbuf, rem);
518
519 if (ret)
520 return ret;
521 }
522
523 virt = ringbuf->virtual_start + ringbuf->tail;
524 rem /= 4;
525 while (rem--)
526 iowrite32(MI_NOOP, virt++);
527
528 ringbuf->tail = 0;
529 ringbuf->space = intel_ring_space(ringbuf);
530
531 return 0;
532}
533
534static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
535{
536 int ret;
537
538 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
539 ret = logical_ring_wrap_buffer(ringbuf);
540 if (unlikely(ret))
541 return ret;
542 }
543
544 if (unlikely(ringbuf->space < bytes)) {
545 ret = logical_ring_wait_for_space(ringbuf, bytes);
546 if (unlikely(ret))
547 return ret;
548 }
549
550 return 0;
551}
552
553int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
554{
555 struct intel_engine_cs *ring = ringbuf->ring;
556 struct drm_device *dev = ring->dev;
557 struct drm_i915_private *dev_priv = dev->dev_private;
558 int ret;
559
560 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
561 dev_priv->mm.interruptible);
562 if (ret)
563 return ret;
564
565 ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
566 if (ret)
567 return ret;
568
569 /* Preallocate the olr before touching the ring */
Oscar Mateo48e29f52014-07-24 17:04:29 +0100570 ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
Oscar Mateo82e104c2014-07-24 17:04:26 +0100571 if (ret)
572 return ret;
573
574 ringbuf->space -= num_dwords * sizeof(uint32_t);
575 return 0;
576}
577
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100578static int gen8_init_common_ring(struct intel_engine_cs *ring)
579{
580 struct drm_device *dev = ring->dev;
581 struct drm_i915_private *dev_priv = dev->dev_private;
582
Oscar Mateo73d477f2014-07-24 17:04:31 +0100583 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
584 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
585
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100586 I915_WRITE(RING_MODE_GEN7(ring),
587 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
588 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
589 POSTING_READ(RING_MODE_GEN7(ring));
590 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
591
592 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
593
594 return 0;
595}
596
597static int gen8_init_render_ring(struct intel_engine_cs *ring)
598{
599 struct drm_device *dev = ring->dev;
600 struct drm_i915_private *dev_priv = dev->dev_private;
601 int ret;
602
603 ret = gen8_init_common_ring(ring);
604 if (ret)
605 return ret;
606
607 /* We need to disable the AsyncFlip performance optimisations in order
608 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
609 * programmed to '1' on all products.
610 *
611 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
612 */
613 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
614
615 ret = intel_init_pipe_control(ring);
616 if (ret)
617 return ret;
618
619 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
620
621 return ret;
622}
623
Oscar Mateo15648582014-07-24 17:04:32 +0100624static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
625 u64 offset, unsigned flags)
626{
Oscar Mateo15648582014-07-24 17:04:32 +0100627 bool ppgtt = !(flags & I915_DISPATCH_SECURE);
628 int ret;
629
630 ret = intel_logical_ring_begin(ringbuf, 4);
631 if (ret)
632 return ret;
633
634 /* FIXME(BDW): Address space and security selectors. */
635 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
636 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
637 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
638 intel_logical_ring_emit(ringbuf, MI_NOOP);
639 intel_logical_ring_advance(ringbuf);
640
641 return 0;
642}
643
Oscar Mateo73d477f2014-07-24 17:04:31 +0100644static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
645{
646 struct drm_device *dev = ring->dev;
647 struct drm_i915_private *dev_priv = dev->dev_private;
648 unsigned long flags;
649
650 if (!dev->irq_enabled)
651 return false;
652
653 spin_lock_irqsave(&dev_priv->irq_lock, flags);
654 if (ring->irq_refcount++ == 0) {
655 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
656 POSTING_READ(RING_IMR(ring->mmio_base));
657 }
658 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
659
660 return true;
661}
662
663static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
664{
665 struct drm_device *dev = ring->dev;
666 struct drm_i915_private *dev_priv = dev->dev_private;
667 unsigned long flags;
668
669 spin_lock_irqsave(&dev_priv->irq_lock, flags);
670 if (--ring->irq_refcount == 0) {
671 I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
672 POSTING_READ(RING_IMR(ring->mmio_base));
673 }
674 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
675}
676
Oscar Mateo47122742014-07-24 17:04:28 +0100677static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
678 u32 invalidate_domains,
679 u32 unused)
680{
681 struct intel_engine_cs *ring = ringbuf->ring;
682 struct drm_device *dev = ring->dev;
683 struct drm_i915_private *dev_priv = dev->dev_private;
684 uint32_t cmd;
685 int ret;
686
687 ret = intel_logical_ring_begin(ringbuf, 4);
688 if (ret)
689 return ret;
690
691 cmd = MI_FLUSH_DW + 1;
692
693 if (ring == &dev_priv->ring[VCS]) {
694 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
695 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
696 MI_FLUSH_DW_STORE_INDEX |
697 MI_FLUSH_DW_OP_STOREDW;
698 } else {
699 if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
700 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
701 MI_FLUSH_DW_OP_STOREDW;
702 }
703
704 intel_logical_ring_emit(ringbuf, cmd);
705 intel_logical_ring_emit(ringbuf,
706 I915_GEM_HWS_SCRATCH_ADDR |
707 MI_FLUSH_DW_USE_GTT);
708 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
709 intel_logical_ring_emit(ringbuf, 0); /* value */
710 intel_logical_ring_advance(ringbuf);
711
712 return 0;
713}
714
715static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
716 u32 invalidate_domains,
717 u32 flush_domains)
718{
719 struct intel_engine_cs *ring = ringbuf->ring;
720 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
721 u32 flags = 0;
722 int ret;
723
724 flags |= PIPE_CONTROL_CS_STALL;
725
726 if (flush_domains) {
727 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
728 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
729 }
730
731 if (invalidate_domains) {
732 flags |= PIPE_CONTROL_TLB_INVALIDATE;
733 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
734 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
735 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
736 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
737 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
738 flags |= PIPE_CONTROL_QW_WRITE;
739 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
740 }
741
742 ret = intel_logical_ring_begin(ringbuf, 6);
743 if (ret)
744 return ret;
745
746 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
747 intel_logical_ring_emit(ringbuf, flags);
748 intel_logical_ring_emit(ringbuf, scratch_addr);
749 intel_logical_ring_emit(ringbuf, 0);
750 intel_logical_ring_emit(ringbuf, 0);
751 intel_logical_ring_emit(ringbuf, 0);
752 intel_logical_ring_advance(ringbuf);
753
754 return 0;
755}
756
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100757static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
758{
759 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
760}
761
762static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
763{
764 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
765}
766
Oscar Mateo4da46e12014-07-24 17:04:27 +0100767static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
768{
769 struct intel_engine_cs *ring = ringbuf->ring;
770 u32 cmd;
771 int ret;
772
773 ret = intel_logical_ring_begin(ringbuf, 6);
774 if (ret)
775 return ret;
776
777 cmd = MI_STORE_DWORD_IMM_GEN8;
778 cmd |= MI_GLOBAL_GTT;
779
780 intel_logical_ring_emit(ringbuf, cmd);
781 intel_logical_ring_emit(ringbuf,
782 (ring->status_page.gfx_addr +
783 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
784 intel_logical_ring_emit(ringbuf, 0);
785 intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
786 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
787 intel_logical_ring_emit(ringbuf, MI_NOOP);
788 intel_logical_ring_advance_and_submit(ringbuf);
789
790 return 0;
791}
792
Oscar Mateo454afeb2014-07-24 17:04:22 +0100793void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
794{
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100795 struct drm_i915_private *dev_priv = ring->dev->dev_private;
796
Oscar Mateo48d82382014-07-24 17:04:23 +0100797 if (!intel_ring_initialized(ring))
798 return;
799
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100800 intel_logical_ring_stop(ring);
801 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
Oscar Mateo48d82382014-07-24 17:04:23 +0100802 ring->preallocated_lazy_request = NULL;
803 ring->outstanding_lazy_seqno = 0;
804
805 if (ring->cleanup)
806 ring->cleanup(ring);
807
808 i915_cmd_parser_fini_ring(ring);
809
810 if (ring->status_page.obj) {
811 kunmap(sg_page(ring->status_page.obj->pages->sgl));
812 ring->status_page.obj = NULL;
813 }
Oscar Mateo454afeb2014-07-24 17:04:22 +0100814}
815
816static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
817{
Oscar Mateo48d82382014-07-24 17:04:23 +0100818 int ret;
819 struct intel_context *dctx = ring->default_context;
820 struct drm_i915_gem_object *dctx_obj;
821
822 /* Intentionally left blank. */
823 ring->buffer = NULL;
824
825 ring->dev = dev;
826 INIT_LIST_HEAD(&ring->active_list);
827 INIT_LIST_HEAD(&ring->request_list);
828 init_waitqueue_head(&ring->irq_queue);
829
830 ret = intel_lr_context_deferred_create(dctx, ring);
831 if (ret)
832 return ret;
833
834 /* The status page is offset 0 from the context object in LRCs. */
835 dctx_obj = dctx->engine[ring->id].state;
836 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
837 ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
838 if (ring->status_page.page_addr == NULL)
839 return -ENOMEM;
840 ring->status_page.obj = dctx_obj;
841
842 ret = i915_cmd_parser_init_ring(ring);
843 if (ret)
844 return ret;
845
846 if (ring->init) {
847 ret = ring->init(ring);
848 if (ret)
849 return ret;
850 }
851
Oscar Mateo454afeb2014-07-24 17:04:22 +0100852 return 0;
853}
854
855static int logical_render_ring_init(struct drm_device *dev)
856{
857 struct drm_i915_private *dev_priv = dev->dev_private;
858 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
859
860 ring->name = "render ring";
861 ring->id = RCS;
862 ring->mmio_base = RENDER_RING_BASE;
863 ring->irq_enable_mask =
864 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100865 ring->irq_keep_mask =
866 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
867 if (HAS_L3_DPF(dev))
868 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100869
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100870 ring->init = gen8_init_render_ring;
871 ring->cleanup = intel_fini_pipe_control;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100872 ring->get_seqno = gen8_get_seqno;
873 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100874 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100875 ring->emit_flush = gen8_emit_flush_render;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100876 ring->irq_get = gen8_logical_ring_get_irq;
877 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100878 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100879
Oscar Mateo454afeb2014-07-24 17:04:22 +0100880 return logical_ring_init(dev, ring);
881}
882
883static int logical_bsd_ring_init(struct drm_device *dev)
884{
885 struct drm_i915_private *dev_priv = dev->dev_private;
886 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
887
888 ring->name = "bsd ring";
889 ring->id = VCS;
890 ring->mmio_base = GEN6_BSD_RING_BASE;
891 ring->irq_enable_mask =
892 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100893 ring->irq_keep_mask =
894 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100895
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100896 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100897 ring->get_seqno = gen8_get_seqno;
898 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100899 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100900 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100901 ring->irq_get = gen8_logical_ring_get_irq;
902 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100903 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100904
Oscar Mateo454afeb2014-07-24 17:04:22 +0100905 return logical_ring_init(dev, ring);
906}
907
908static int logical_bsd2_ring_init(struct drm_device *dev)
909{
910 struct drm_i915_private *dev_priv = dev->dev_private;
911 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
912
913 ring->name = "bds2 ring";
914 ring->id = VCS2;
915 ring->mmio_base = GEN8_BSD2_RING_BASE;
916 ring->irq_enable_mask =
917 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100918 ring->irq_keep_mask =
919 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100920
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100921 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100922 ring->get_seqno = gen8_get_seqno;
923 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100924 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100925 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100926 ring->irq_get = gen8_logical_ring_get_irq;
927 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100928 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100929
Oscar Mateo454afeb2014-07-24 17:04:22 +0100930 return logical_ring_init(dev, ring);
931}
932
933static int logical_blt_ring_init(struct drm_device *dev)
934{
935 struct drm_i915_private *dev_priv = dev->dev_private;
936 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
937
938 ring->name = "blitter ring";
939 ring->id = BCS;
940 ring->mmio_base = BLT_RING_BASE;
941 ring->irq_enable_mask =
942 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100943 ring->irq_keep_mask =
944 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100945
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100946 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100947 ring->get_seqno = gen8_get_seqno;
948 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100949 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100950 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100951 ring->irq_get = gen8_logical_ring_get_irq;
952 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100953 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100954
Oscar Mateo454afeb2014-07-24 17:04:22 +0100955 return logical_ring_init(dev, ring);
956}
957
958static int logical_vebox_ring_init(struct drm_device *dev)
959{
960 struct drm_i915_private *dev_priv = dev->dev_private;
961 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
962
963 ring->name = "video enhancement ring";
964 ring->id = VECS;
965 ring->mmio_base = VEBOX_RING_BASE;
966 ring->irq_enable_mask =
967 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100968 ring->irq_keep_mask =
969 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100970
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100971 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100972 ring->get_seqno = gen8_get_seqno;
973 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100974 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100975 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100976 ring->irq_get = gen8_logical_ring_get_irq;
977 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100978 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100979
Oscar Mateo454afeb2014-07-24 17:04:22 +0100980 return logical_ring_init(dev, ring);
981}
982
983int intel_logical_rings_init(struct drm_device *dev)
984{
985 struct drm_i915_private *dev_priv = dev->dev_private;
986 int ret;
987
988 ret = logical_render_ring_init(dev);
989 if (ret)
990 return ret;
991
992 if (HAS_BSD(dev)) {
993 ret = logical_bsd_ring_init(dev);
994 if (ret)
995 goto cleanup_render_ring;
996 }
997
998 if (HAS_BLT(dev)) {
999 ret = logical_blt_ring_init(dev);
1000 if (ret)
1001 goto cleanup_bsd_ring;
1002 }
1003
1004 if (HAS_VEBOX(dev)) {
1005 ret = logical_vebox_ring_init(dev);
1006 if (ret)
1007 goto cleanup_blt_ring;
1008 }
1009
1010 if (HAS_BSD2(dev)) {
1011 ret = logical_bsd2_ring_init(dev);
1012 if (ret)
1013 goto cleanup_vebox_ring;
1014 }
1015
1016 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
1017 if (ret)
1018 goto cleanup_bsd2_ring;
1019
1020 return 0;
1021
1022cleanup_bsd2_ring:
1023 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
1024cleanup_vebox_ring:
1025 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
1026cleanup_blt_ring:
1027 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
1028cleanup_bsd_ring:
1029 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
1030cleanup_render_ring:
1031 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
1032
1033 return ret;
1034}
1035
Oscar Mateo8670d6f2014-07-24 17:04:17 +01001036static int
1037populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
1038 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
1039{
1040 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
Daniel Vetterae6c4802014-08-06 15:04:53 +02001041 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01001042 struct page *page;
1043 uint32_t *reg_state;
1044 int ret;
1045
1046 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
1047 if (ret) {
1048 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
1049 return ret;
1050 }
1051
1052 ret = i915_gem_object_get_pages(ctx_obj);
1053 if (ret) {
1054 DRM_DEBUG_DRIVER("Could not get object pages\n");
1055 return ret;
1056 }
1057
1058 i915_gem_object_pin_pages(ctx_obj);
1059
1060 /* The second page of the context object contains some fields which must
1061 * be set up prior to the first execution. */
1062 page = i915_gem_object_get_page(ctx_obj, 1);
1063 reg_state = kmap_atomic(page);
1064
1065 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1066 * commands followed by (reg, value) pairs. The values we are setting here are
1067 * only for the first context restore: on a subsequent save, the GPU will
1068 * recreate this batchbuffer with new values (including all the missing
1069 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
1070 if (ring->id == RCS)
1071 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
1072 else
1073 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
1074 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
1075 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
1076 reg_state[CTX_CONTEXT_CONTROL+1] =
1077 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
1078 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
1079 reg_state[CTX_RING_HEAD+1] = 0;
1080 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
1081 reg_state[CTX_RING_TAIL+1] = 0;
1082 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
1083 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
1084 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
1085 reg_state[CTX_RING_BUFFER_CONTROL+1] =
1086 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
1087 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
1088 reg_state[CTX_BB_HEAD_U+1] = 0;
1089 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
1090 reg_state[CTX_BB_HEAD_L+1] = 0;
1091 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
1092 reg_state[CTX_BB_STATE+1] = (1<<5);
1093 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
1094 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
1095 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
1096 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
1097 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
1098 reg_state[CTX_SECOND_BB_STATE+1] = 0;
1099 if (ring->id == RCS) {
1100 /* TODO: according to BSpec, the register state context
1101 * for CHV does not have these. OTOH, these registers do
1102 * exist in CHV. I'm waiting for a clarification */
1103 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
1104 reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
1105 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
1106 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
1107 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
1108 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
1109 }
1110 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
1111 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
1112 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
1113 reg_state[CTX_CTX_TIMESTAMP+1] = 0;
1114 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
1115 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
1116 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
1117 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
1118 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
1119 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
1120 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
1121 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
1122 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
1123 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
1124 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
1125 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
1126 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
1127 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
1128 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
1129 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
1130 if (ring->id == RCS) {
1131 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
1132 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
1133 reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
1134 }
1135
1136 kunmap_atomic(reg_state);
1137
1138 ctx_obj->dirty = 1;
1139 set_page_dirty(page);
1140 i915_gem_object_unpin_pages(ctx_obj);
1141
1142 return 0;
1143}
1144
Oscar Mateoede7d422014-07-24 17:04:12 +01001145void intel_lr_context_free(struct intel_context *ctx)
1146{
Oscar Mateo8c8579172014-07-24 17:04:14 +01001147 int i;
1148
1149 for (i = 0; i < I915_NUM_RINGS; i++) {
1150 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
Oscar Mateo84c23772014-07-24 17:04:15 +01001151 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
1152
Oscar Mateo8c8579172014-07-24 17:04:14 +01001153 if (ctx_obj) {
Oscar Mateo84c23772014-07-24 17:04:15 +01001154 intel_destroy_ringbuffer_obj(ringbuf);
1155 kfree(ringbuf);
Oscar Mateo8c8579172014-07-24 17:04:14 +01001156 i915_gem_object_ggtt_unpin(ctx_obj);
1157 drm_gem_object_unreference(&ctx_obj->base);
1158 }
1159 }
1160}
1161
1162static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
1163{
1164 int ret = 0;
1165
1166 WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
1167
1168 switch (ring->id) {
1169 case RCS:
1170 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
1171 break;
1172 case VCS:
1173 case BCS:
1174 case VECS:
1175 case VCS2:
1176 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
1177 break;
1178 }
1179
1180 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01001181}
1182
1183int intel_lr_context_deferred_create(struct intel_context *ctx,
1184 struct intel_engine_cs *ring)
1185{
Oscar Mateo8c8579172014-07-24 17:04:14 +01001186 struct drm_device *dev = ring->dev;
1187 struct drm_i915_gem_object *ctx_obj;
1188 uint32_t context_size;
Oscar Mateo84c23772014-07-24 17:04:15 +01001189 struct intel_ringbuffer *ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +01001190 int ret;
1191
Oscar Mateoede7d422014-07-24 17:04:12 +01001192 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
Oscar Mateo48d82382014-07-24 17:04:23 +01001193 if (ctx->engine[ring->id].state)
1194 return 0;
Oscar Mateoede7d422014-07-24 17:04:12 +01001195
Oscar Mateo8c8579172014-07-24 17:04:14 +01001196 context_size = round_up(get_lr_context_size(ring), 4096);
1197
1198 ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
1199 if (IS_ERR(ctx_obj)) {
1200 ret = PTR_ERR(ctx_obj);
1201 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
1202 return ret;
1203 }
1204
1205 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
1206 if (ret) {
1207 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
1208 drm_gem_object_unreference(&ctx_obj->base);
1209 return ret;
1210 }
1211
Oscar Mateo84c23772014-07-24 17:04:15 +01001212 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1213 if (!ringbuf) {
1214 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
1215 ring->name);
1216 i915_gem_object_ggtt_unpin(ctx_obj);
1217 drm_gem_object_unreference(&ctx_obj->base);
1218 ret = -ENOMEM;
1219 return ret;
1220 }
1221
Daniel Vetter0c7dd532014-08-11 16:17:44 +02001222 ringbuf->ring = ring;
Oscar Mateo582d67f2014-07-24 17:04:16 +01001223 ringbuf->FIXME_lrc_ctx = ctx;
1224
Oscar Mateo84c23772014-07-24 17:04:15 +01001225 ringbuf->size = 32 * PAGE_SIZE;
1226 ringbuf->effective_size = ringbuf->size;
1227 ringbuf->head = 0;
1228 ringbuf->tail = 0;
1229 ringbuf->space = ringbuf->size;
1230 ringbuf->last_retired_head = -1;
1231
1232 /* TODO: For now we put this in the mappable region so that we can reuse
1233 * the existing ringbuffer code which ioremaps it. When we start
1234 * creating many contexts, this will no longer work and we must switch
1235 * to a kmapish interface.
1236 */
1237 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1238 if (ret) {
1239 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
1240 ring->name, ret);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01001241 goto error;
1242 }
1243
1244 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
1245 if (ret) {
1246 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
1247 intel_destroy_ringbuffer_obj(ringbuf);
1248 goto error;
Oscar Mateo84c23772014-07-24 17:04:15 +01001249 }
1250
1251 ctx->engine[ring->id].ringbuf = ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +01001252 ctx->engine[ring->id].state = ctx_obj;
Oscar Mateoede7d422014-07-24 17:04:12 +01001253
1254 return 0;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01001255
1256error:
1257 kfree(ringbuf);
1258 i915_gem_object_ggtt_unpin(ctx_obj);
1259 drm_gem_object_unreference(&ctx_obj->base);
1260 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01001261}