blob: e1a298f23f508f1c3640872894c43381b6b9dafd [file] [log] [blame]
Oscar Mateob20385f2014-07-24 17:04:10 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
31/*
32 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
33 * These expanded contexts enable a number of new abilities, especially
34 * "Execlists" (also implemented in this file).
35 *
36 * Execlists are the new method by which, on gen8+ hardware, workloads are
37 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
38 */
39
40#include <drm/drmP.h>
41#include <drm/i915_drm.h>
42#include "i915_drv.h"
Oscar Mateo127f1002014-07-24 17:04:11 +010043
Oscar Mateo8c8579172014-07-24 17:04:14 +010044#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
45#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
46
47#define GEN8_LR_CONTEXT_ALIGN 4096
48
Oscar Mateo8670d6f2014-07-24 17:04:17 +010049#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
50#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
51
52#define CTX_LRI_HEADER_0 0x01
53#define CTX_CONTEXT_CONTROL 0x02
54#define CTX_RING_HEAD 0x04
55#define CTX_RING_TAIL 0x06
56#define CTX_RING_BUFFER_START 0x08
57#define CTX_RING_BUFFER_CONTROL 0x0a
58#define CTX_BB_HEAD_U 0x0c
59#define CTX_BB_HEAD_L 0x0e
60#define CTX_BB_STATE 0x10
61#define CTX_SECOND_BB_HEAD_U 0x12
62#define CTX_SECOND_BB_HEAD_L 0x14
63#define CTX_SECOND_BB_STATE 0x16
64#define CTX_BB_PER_CTX_PTR 0x18
65#define CTX_RCS_INDIRECT_CTX 0x1a
66#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
67#define CTX_LRI_HEADER_1 0x21
68#define CTX_CTX_TIMESTAMP 0x22
69#define CTX_PDP3_UDW 0x24
70#define CTX_PDP3_LDW 0x26
71#define CTX_PDP2_UDW 0x28
72#define CTX_PDP2_LDW 0x2a
73#define CTX_PDP1_UDW 0x2c
74#define CTX_PDP1_LDW 0x2e
75#define CTX_PDP0_UDW 0x30
76#define CTX_PDP0_LDW 0x32
77#define CTX_LRI_HEADER_2 0x41
78#define CTX_R_PWR_CLK_STATE 0x42
79#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
80
Oscar Mateo127f1002014-07-24 17:04:11 +010081int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
82{
Daniel Vetterbd84b1e2014-08-11 15:57:57 +020083 WARN_ON(i915.enable_ppgtt == -1);
84
Oscar Mateo127f1002014-07-24 17:04:11 +010085 if (enable_execlists == 0)
86 return 0;
87
88 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev))
89 return 1;
90
91 return 0;
92}
Oscar Mateoede7d422014-07-24 17:04:12 +010093
Oscar Mateoba8b7cc2014-07-24 17:04:33 +010094static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
95{
96 struct intel_engine_cs *ring = ringbuf->ring;
97 uint32_t flush_domains;
98 int ret;
99
100 flush_domains = 0;
101 if (ring->gpu_caches_dirty)
102 flush_domains = I915_GEM_GPU_DOMAINS;
103
104 ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
105 if (ret)
106 return ret;
107
108 ring->gpu_caches_dirty = false;
109 return 0;
110}
111
112static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
113 struct list_head *vmas)
114{
115 struct intel_engine_cs *ring = ringbuf->ring;
116 struct i915_vma *vma;
117 uint32_t flush_domains = 0;
118 bool flush_chipset = false;
119 int ret;
120
121 list_for_each_entry(vma, vmas, exec_list) {
122 struct drm_i915_gem_object *obj = vma->obj;
123
124 ret = i915_gem_object_sync(obj, ring);
125 if (ret)
126 return ret;
127
128 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
129 flush_chipset |= i915_gem_clflush_object(obj, false);
130
131 flush_domains |= obj->base.write_domain;
132 }
133
134 if (flush_domains & I915_GEM_DOMAIN_GTT)
135 wmb();
136
137 /* Unconditionally invalidate gpu caches and ensure that we do flush
138 * any residual writes from the previous batch.
139 */
140 return logical_ring_invalidate_all_caches(ringbuf);
141}
142
Oscar Mateo454afeb2014-07-24 17:04:22 +0100143int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
144 struct intel_engine_cs *ring,
145 struct intel_context *ctx,
146 struct drm_i915_gem_execbuffer2 *args,
147 struct list_head *vmas,
148 struct drm_i915_gem_object *batch_obj,
149 u64 exec_start, u32 flags)
150{
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100151 struct drm_i915_private *dev_priv = dev->dev_private;
152 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
153 int instp_mode;
154 u32 instp_mask;
155 int ret;
156
157 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
158 instp_mask = I915_EXEC_CONSTANTS_MASK;
159 switch (instp_mode) {
160 case I915_EXEC_CONSTANTS_REL_GENERAL:
161 case I915_EXEC_CONSTANTS_ABSOLUTE:
162 case I915_EXEC_CONSTANTS_REL_SURFACE:
163 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
164 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
165 return -EINVAL;
166 }
167
168 if (instp_mode != dev_priv->relative_constants_mode) {
169 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
170 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
171 return -EINVAL;
172 }
173
174 /* The HW changed the meaning on this bit on gen6 */
175 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
176 }
177 break;
178 default:
179 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
180 return -EINVAL;
181 }
182
183 if (args->num_cliprects != 0) {
184 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
185 return -EINVAL;
186 } else {
187 if (args->DR4 == 0xffffffff) {
188 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
189 args->DR4 = 0;
190 }
191
192 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
193 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
194 return -EINVAL;
195 }
196 }
197
198 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
199 DRM_DEBUG("sol reset is gen7 only\n");
200 return -EINVAL;
201 }
202
203 ret = execlists_move_to_gpu(ringbuf, vmas);
204 if (ret)
205 return ret;
206
207 if (ring == &dev_priv->ring[RCS] &&
208 instp_mode != dev_priv->relative_constants_mode) {
209 ret = intel_logical_ring_begin(ringbuf, 4);
210 if (ret)
211 return ret;
212
213 intel_logical_ring_emit(ringbuf, MI_NOOP);
214 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
215 intel_logical_ring_emit(ringbuf, INSTPM);
216 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
217 intel_logical_ring_advance(ringbuf);
218
219 dev_priv->relative_constants_mode = instp_mode;
220 }
221
222 ret = ring->emit_bb_start(ringbuf, exec_start, flags);
223 if (ret)
224 return ret;
225
226 i915_gem_execbuffer_move_to_active(vmas, ring);
227 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
228
Oscar Mateo454afeb2014-07-24 17:04:22 +0100229 return 0;
230}
231
232void intel_logical_ring_stop(struct intel_engine_cs *ring)
233{
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100234 struct drm_i915_private *dev_priv = ring->dev->dev_private;
235 int ret;
236
237 if (!intel_ring_initialized(ring))
238 return;
239
240 ret = intel_ring_idle(ring);
241 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
242 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
243 ring->name, ret);
244
245 /* TODO: Is this correct with Execlists enabled? */
246 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
247 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
248 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
249 return;
250 }
251 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
Oscar Mateo454afeb2014-07-24 17:04:22 +0100252}
253
Oscar Mateo82e104c2014-07-24 17:04:26 +0100254void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
255{
256 intel_logical_ring_advance(ringbuf);
257
258 if (intel_ring_stopped(ringbuf->ring))
259 return;
260
261 /* TODO: how to submit a context to the ELSP is not here yet */
262}
263
264static int logical_ring_alloc_seqno(struct intel_engine_cs *ring)
265{
266 if (ring->outstanding_lazy_seqno)
267 return 0;
268
269 if (ring->preallocated_lazy_request == NULL) {
270 struct drm_i915_gem_request *request;
271
272 request = kmalloc(sizeof(*request), GFP_KERNEL);
273 if (request == NULL)
274 return -ENOMEM;
275
276 ring->preallocated_lazy_request = request;
277 }
278
279 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
280}
281
282static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
283 int bytes)
284{
285 struct intel_engine_cs *ring = ringbuf->ring;
286 struct drm_i915_gem_request *request;
287 u32 seqno = 0;
288 int ret;
289
290 if (ringbuf->last_retired_head != -1) {
291 ringbuf->head = ringbuf->last_retired_head;
292 ringbuf->last_retired_head = -1;
293
294 ringbuf->space = intel_ring_space(ringbuf);
295 if (ringbuf->space >= bytes)
296 return 0;
297 }
298
299 list_for_each_entry(request, &ring->request_list, list) {
300 if (__intel_ring_space(request->tail, ringbuf->tail,
301 ringbuf->size) >= bytes) {
302 seqno = request->seqno;
303 break;
304 }
305 }
306
307 if (seqno == 0)
308 return -ENOSPC;
309
310 ret = i915_wait_seqno(ring, seqno);
311 if (ret)
312 return ret;
313
314 /* TODO: make sure we update the right ringbuffer's last_retired_head
315 * when retiring requests */
316 i915_gem_retire_requests_ring(ring);
317 ringbuf->head = ringbuf->last_retired_head;
318 ringbuf->last_retired_head = -1;
319
320 ringbuf->space = intel_ring_space(ringbuf);
321 return 0;
322}
323
324static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
325 int bytes)
326{
327 struct intel_engine_cs *ring = ringbuf->ring;
328 struct drm_device *dev = ring->dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 unsigned long end;
331 int ret;
332
333 ret = logical_ring_wait_request(ringbuf, bytes);
334 if (ret != -ENOSPC)
335 return ret;
336
337 /* Force the context submission in case we have been skipping it */
338 intel_logical_ring_advance_and_submit(ringbuf);
339
340 /* With GEM the hangcheck timer should kick us out of the loop,
341 * leaving it early runs the risk of corrupting GEM state (due
342 * to running on almost untested codepaths). But on resume
343 * timers don't work yet, so prevent a complete hang in that
344 * case by choosing an insanely large timeout. */
345 end = jiffies + 60 * HZ;
346
347 do {
348 ringbuf->head = I915_READ_HEAD(ring);
349 ringbuf->space = intel_ring_space(ringbuf);
350 if (ringbuf->space >= bytes) {
351 ret = 0;
352 break;
353 }
354
355 msleep(1);
356
357 if (dev_priv->mm.interruptible && signal_pending(current)) {
358 ret = -ERESTARTSYS;
359 break;
360 }
361
362 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
363 dev_priv->mm.interruptible);
364 if (ret)
365 break;
366
367 if (time_after(jiffies, end)) {
368 ret = -EBUSY;
369 break;
370 }
371 } while (1);
372
373 return ret;
374}
375
376static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
377{
378 uint32_t __iomem *virt;
379 int rem = ringbuf->size - ringbuf->tail;
380
381 if (ringbuf->space < rem) {
382 int ret = logical_ring_wait_for_space(ringbuf, rem);
383
384 if (ret)
385 return ret;
386 }
387
388 virt = ringbuf->virtual_start + ringbuf->tail;
389 rem /= 4;
390 while (rem--)
391 iowrite32(MI_NOOP, virt++);
392
393 ringbuf->tail = 0;
394 ringbuf->space = intel_ring_space(ringbuf);
395
396 return 0;
397}
398
399static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
400{
401 int ret;
402
403 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
404 ret = logical_ring_wrap_buffer(ringbuf);
405 if (unlikely(ret))
406 return ret;
407 }
408
409 if (unlikely(ringbuf->space < bytes)) {
410 ret = logical_ring_wait_for_space(ringbuf, bytes);
411 if (unlikely(ret))
412 return ret;
413 }
414
415 return 0;
416}
417
418int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
419{
420 struct intel_engine_cs *ring = ringbuf->ring;
421 struct drm_device *dev = ring->dev;
422 struct drm_i915_private *dev_priv = dev->dev_private;
423 int ret;
424
425 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
426 dev_priv->mm.interruptible);
427 if (ret)
428 return ret;
429
430 ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
431 if (ret)
432 return ret;
433
434 /* Preallocate the olr before touching the ring */
435 ret = logical_ring_alloc_seqno(ring);
436 if (ret)
437 return ret;
438
439 ringbuf->space -= num_dwords * sizeof(uint32_t);
440 return 0;
441}
442
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100443static int gen8_init_common_ring(struct intel_engine_cs *ring)
444{
445 struct drm_device *dev = ring->dev;
446 struct drm_i915_private *dev_priv = dev->dev_private;
447
Oscar Mateo73d477f2014-07-24 17:04:31 +0100448 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
449 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
450
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100451 I915_WRITE(RING_MODE_GEN7(ring),
452 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
453 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
454 POSTING_READ(RING_MODE_GEN7(ring));
455 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
456
457 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
458
459 return 0;
460}
461
462static int gen8_init_render_ring(struct intel_engine_cs *ring)
463{
464 struct drm_device *dev = ring->dev;
465 struct drm_i915_private *dev_priv = dev->dev_private;
466 int ret;
467
468 ret = gen8_init_common_ring(ring);
469 if (ret)
470 return ret;
471
472 /* We need to disable the AsyncFlip performance optimisations in order
473 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
474 * programmed to '1' on all products.
475 *
476 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
477 */
478 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
479
480 ret = intel_init_pipe_control(ring);
481 if (ret)
482 return ret;
483
484 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
485
486 return ret;
487}
488
Oscar Mateo15648582014-07-24 17:04:32 +0100489static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
490 u64 offset, unsigned flags)
491{
Oscar Mateo15648582014-07-24 17:04:32 +0100492 bool ppgtt = !(flags & I915_DISPATCH_SECURE);
493 int ret;
494
495 ret = intel_logical_ring_begin(ringbuf, 4);
496 if (ret)
497 return ret;
498
499 /* FIXME(BDW): Address space and security selectors. */
500 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
501 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
502 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
503 intel_logical_ring_emit(ringbuf, MI_NOOP);
504 intel_logical_ring_advance(ringbuf);
505
506 return 0;
507}
508
Oscar Mateo73d477f2014-07-24 17:04:31 +0100509static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
510{
511 struct drm_device *dev = ring->dev;
512 struct drm_i915_private *dev_priv = dev->dev_private;
513 unsigned long flags;
514
515 if (!dev->irq_enabled)
516 return false;
517
518 spin_lock_irqsave(&dev_priv->irq_lock, flags);
519 if (ring->irq_refcount++ == 0) {
520 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
521 POSTING_READ(RING_IMR(ring->mmio_base));
522 }
523 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
524
525 return true;
526}
527
528static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
529{
530 struct drm_device *dev = ring->dev;
531 struct drm_i915_private *dev_priv = dev->dev_private;
532 unsigned long flags;
533
534 spin_lock_irqsave(&dev_priv->irq_lock, flags);
535 if (--ring->irq_refcount == 0) {
536 I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
537 POSTING_READ(RING_IMR(ring->mmio_base));
538 }
539 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
540}
541
Oscar Mateo47122742014-07-24 17:04:28 +0100542static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
543 u32 invalidate_domains,
544 u32 unused)
545{
546 struct intel_engine_cs *ring = ringbuf->ring;
547 struct drm_device *dev = ring->dev;
548 struct drm_i915_private *dev_priv = dev->dev_private;
549 uint32_t cmd;
550 int ret;
551
552 ret = intel_logical_ring_begin(ringbuf, 4);
553 if (ret)
554 return ret;
555
556 cmd = MI_FLUSH_DW + 1;
557
558 if (ring == &dev_priv->ring[VCS]) {
559 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
560 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
561 MI_FLUSH_DW_STORE_INDEX |
562 MI_FLUSH_DW_OP_STOREDW;
563 } else {
564 if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
565 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
566 MI_FLUSH_DW_OP_STOREDW;
567 }
568
569 intel_logical_ring_emit(ringbuf, cmd);
570 intel_logical_ring_emit(ringbuf,
571 I915_GEM_HWS_SCRATCH_ADDR |
572 MI_FLUSH_DW_USE_GTT);
573 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
574 intel_logical_ring_emit(ringbuf, 0); /* value */
575 intel_logical_ring_advance(ringbuf);
576
577 return 0;
578}
579
580static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
581 u32 invalidate_domains,
582 u32 flush_domains)
583{
584 struct intel_engine_cs *ring = ringbuf->ring;
585 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
586 u32 flags = 0;
587 int ret;
588
589 flags |= PIPE_CONTROL_CS_STALL;
590
591 if (flush_domains) {
592 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
593 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
594 }
595
596 if (invalidate_domains) {
597 flags |= PIPE_CONTROL_TLB_INVALIDATE;
598 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
599 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
600 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
601 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
602 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
603 flags |= PIPE_CONTROL_QW_WRITE;
604 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
605 }
606
607 ret = intel_logical_ring_begin(ringbuf, 6);
608 if (ret)
609 return ret;
610
611 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
612 intel_logical_ring_emit(ringbuf, flags);
613 intel_logical_ring_emit(ringbuf, scratch_addr);
614 intel_logical_ring_emit(ringbuf, 0);
615 intel_logical_ring_emit(ringbuf, 0);
616 intel_logical_ring_emit(ringbuf, 0);
617 intel_logical_ring_advance(ringbuf);
618
619 return 0;
620}
621
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100622static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
623{
624 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
625}
626
627static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
628{
629 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
630}
631
Oscar Mateo4da46e12014-07-24 17:04:27 +0100632static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
633{
634 struct intel_engine_cs *ring = ringbuf->ring;
635 u32 cmd;
636 int ret;
637
638 ret = intel_logical_ring_begin(ringbuf, 6);
639 if (ret)
640 return ret;
641
642 cmd = MI_STORE_DWORD_IMM_GEN8;
643 cmd |= MI_GLOBAL_GTT;
644
645 intel_logical_ring_emit(ringbuf, cmd);
646 intel_logical_ring_emit(ringbuf,
647 (ring->status_page.gfx_addr +
648 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
649 intel_logical_ring_emit(ringbuf, 0);
650 intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
651 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
652 intel_logical_ring_emit(ringbuf, MI_NOOP);
653 intel_logical_ring_advance_and_submit(ringbuf);
654
655 return 0;
656}
657
Oscar Mateo454afeb2014-07-24 17:04:22 +0100658void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
659{
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100660 struct drm_i915_private *dev_priv = ring->dev->dev_private;
661
Oscar Mateo48d82382014-07-24 17:04:23 +0100662 if (!intel_ring_initialized(ring))
663 return;
664
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100665 intel_logical_ring_stop(ring);
666 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
Oscar Mateo48d82382014-07-24 17:04:23 +0100667 ring->preallocated_lazy_request = NULL;
668 ring->outstanding_lazy_seqno = 0;
669
670 if (ring->cleanup)
671 ring->cleanup(ring);
672
673 i915_cmd_parser_fini_ring(ring);
674
675 if (ring->status_page.obj) {
676 kunmap(sg_page(ring->status_page.obj->pages->sgl));
677 ring->status_page.obj = NULL;
678 }
Oscar Mateo454afeb2014-07-24 17:04:22 +0100679}
680
681static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
682{
Oscar Mateo48d82382014-07-24 17:04:23 +0100683 int ret;
684 struct intel_context *dctx = ring->default_context;
685 struct drm_i915_gem_object *dctx_obj;
686
687 /* Intentionally left blank. */
688 ring->buffer = NULL;
689
690 ring->dev = dev;
691 INIT_LIST_HEAD(&ring->active_list);
692 INIT_LIST_HEAD(&ring->request_list);
693 init_waitqueue_head(&ring->irq_queue);
694
695 ret = intel_lr_context_deferred_create(dctx, ring);
696 if (ret)
697 return ret;
698
699 /* The status page is offset 0 from the context object in LRCs. */
700 dctx_obj = dctx->engine[ring->id].state;
701 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
702 ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
703 if (ring->status_page.page_addr == NULL)
704 return -ENOMEM;
705 ring->status_page.obj = dctx_obj;
706
707 ret = i915_cmd_parser_init_ring(ring);
708 if (ret)
709 return ret;
710
711 if (ring->init) {
712 ret = ring->init(ring);
713 if (ret)
714 return ret;
715 }
716
Oscar Mateo454afeb2014-07-24 17:04:22 +0100717 return 0;
718}
719
720static int logical_render_ring_init(struct drm_device *dev)
721{
722 struct drm_i915_private *dev_priv = dev->dev_private;
723 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
724
725 ring->name = "render ring";
726 ring->id = RCS;
727 ring->mmio_base = RENDER_RING_BASE;
728 ring->irq_enable_mask =
729 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100730 ring->irq_keep_mask =
731 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
732 if (HAS_L3_DPF(dev))
733 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100734
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100735 ring->init = gen8_init_render_ring;
736 ring->cleanup = intel_fini_pipe_control;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100737 ring->get_seqno = gen8_get_seqno;
738 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100739 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100740 ring->emit_flush = gen8_emit_flush_render;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100741 ring->irq_get = gen8_logical_ring_get_irq;
742 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100743 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100744
Oscar Mateo454afeb2014-07-24 17:04:22 +0100745 return logical_ring_init(dev, ring);
746}
747
748static int logical_bsd_ring_init(struct drm_device *dev)
749{
750 struct drm_i915_private *dev_priv = dev->dev_private;
751 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
752
753 ring->name = "bsd ring";
754 ring->id = VCS;
755 ring->mmio_base = GEN6_BSD_RING_BASE;
756 ring->irq_enable_mask =
757 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100758 ring->irq_keep_mask =
759 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100760
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100761 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100762 ring->get_seqno = gen8_get_seqno;
763 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100764 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100765 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100766 ring->irq_get = gen8_logical_ring_get_irq;
767 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100768 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100769
Oscar Mateo454afeb2014-07-24 17:04:22 +0100770 return logical_ring_init(dev, ring);
771}
772
773static int logical_bsd2_ring_init(struct drm_device *dev)
774{
775 struct drm_i915_private *dev_priv = dev->dev_private;
776 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
777
778 ring->name = "bds2 ring";
779 ring->id = VCS2;
780 ring->mmio_base = GEN8_BSD2_RING_BASE;
781 ring->irq_enable_mask =
782 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100783 ring->irq_keep_mask =
784 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100785
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100786 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100787 ring->get_seqno = gen8_get_seqno;
788 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100789 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100790 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100791 ring->irq_get = gen8_logical_ring_get_irq;
792 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100793 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100794
Oscar Mateo454afeb2014-07-24 17:04:22 +0100795 return logical_ring_init(dev, ring);
796}
797
798static int logical_blt_ring_init(struct drm_device *dev)
799{
800 struct drm_i915_private *dev_priv = dev->dev_private;
801 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
802
803 ring->name = "blitter ring";
804 ring->id = BCS;
805 ring->mmio_base = BLT_RING_BASE;
806 ring->irq_enable_mask =
807 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100808 ring->irq_keep_mask =
809 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100810
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100811 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100812 ring->get_seqno = gen8_get_seqno;
813 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100814 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100815 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100816 ring->irq_get = gen8_logical_ring_get_irq;
817 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100818 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100819
Oscar Mateo454afeb2014-07-24 17:04:22 +0100820 return logical_ring_init(dev, ring);
821}
822
823static int logical_vebox_ring_init(struct drm_device *dev)
824{
825 struct drm_i915_private *dev_priv = dev->dev_private;
826 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
827
828 ring->name = "video enhancement ring";
829 ring->id = VECS;
830 ring->mmio_base = VEBOX_RING_BASE;
831 ring->irq_enable_mask =
832 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100833 ring->irq_keep_mask =
834 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100835
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100836 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100837 ring->get_seqno = gen8_get_seqno;
838 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100839 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100840 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100841 ring->irq_get = gen8_logical_ring_get_irq;
842 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100843 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100844
Oscar Mateo454afeb2014-07-24 17:04:22 +0100845 return logical_ring_init(dev, ring);
846}
847
848int intel_logical_rings_init(struct drm_device *dev)
849{
850 struct drm_i915_private *dev_priv = dev->dev_private;
851 int ret;
852
853 ret = logical_render_ring_init(dev);
854 if (ret)
855 return ret;
856
857 if (HAS_BSD(dev)) {
858 ret = logical_bsd_ring_init(dev);
859 if (ret)
860 goto cleanup_render_ring;
861 }
862
863 if (HAS_BLT(dev)) {
864 ret = logical_blt_ring_init(dev);
865 if (ret)
866 goto cleanup_bsd_ring;
867 }
868
869 if (HAS_VEBOX(dev)) {
870 ret = logical_vebox_ring_init(dev);
871 if (ret)
872 goto cleanup_blt_ring;
873 }
874
875 if (HAS_BSD2(dev)) {
876 ret = logical_bsd2_ring_init(dev);
877 if (ret)
878 goto cleanup_vebox_ring;
879 }
880
881 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
882 if (ret)
883 goto cleanup_bsd2_ring;
884
885 return 0;
886
887cleanup_bsd2_ring:
888 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
889cleanup_vebox_ring:
890 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
891cleanup_blt_ring:
892 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
893cleanup_bsd_ring:
894 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
895cleanup_render_ring:
896 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
897
898 return ret;
899}
900
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100901static int
902populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
903 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
904{
905 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
906 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
907 struct page *page;
908 uint32_t *reg_state;
909 int ret;
910
911 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
912 if (ret) {
913 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
914 return ret;
915 }
916
917 ret = i915_gem_object_get_pages(ctx_obj);
918 if (ret) {
919 DRM_DEBUG_DRIVER("Could not get object pages\n");
920 return ret;
921 }
922
923 i915_gem_object_pin_pages(ctx_obj);
924
925 /* The second page of the context object contains some fields which must
926 * be set up prior to the first execution. */
927 page = i915_gem_object_get_page(ctx_obj, 1);
928 reg_state = kmap_atomic(page);
929
930 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
931 * commands followed by (reg, value) pairs. The values we are setting here are
932 * only for the first context restore: on a subsequent save, the GPU will
933 * recreate this batchbuffer with new values (including all the missing
934 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
935 if (ring->id == RCS)
936 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
937 else
938 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
939 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
940 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
941 reg_state[CTX_CONTEXT_CONTROL+1] =
942 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
943 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
944 reg_state[CTX_RING_HEAD+1] = 0;
945 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
946 reg_state[CTX_RING_TAIL+1] = 0;
947 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
948 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
949 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
950 reg_state[CTX_RING_BUFFER_CONTROL+1] =
951 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
952 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
953 reg_state[CTX_BB_HEAD_U+1] = 0;
954 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
955 reg_state[CTX_BB_HEAD_L+1] = 0;
956 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
957 reg_state[CTX_BB_STATE+1] = (1<<5);
958 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
959 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
960 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
961 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
962 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
963 reg_state[CTX_SECOND_BB_STATE+1] = 0;
964 if (ring->id == RCS) {
965 /* TODO: according to BSpec, the register state context
966 * for CHV does not have these. OTOH, these registers do
967 * exist in CHV. I'm waiting for a clarification */
968 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
969 reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
970 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
971 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
972 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
973 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
974 }
975 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
976 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
977 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
978 reg_state[CTX_CTX_TIMESTAMP+1] = 0;
979 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
980 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
981 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
982 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
983 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
984 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
985 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
986 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
987 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
988 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
989 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
990 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
991 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
992 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
993 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
994 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
995 if (ring->id == RCS) {
996 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
997 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
998 reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
999 }
1000
1001 kunmap_atomic(reg_state);
1002
1003 ctx_obj->dirty = 1;
1004 set_page_dirty(page);
1005 i915_gem_object_unpin_pages(ctx_obj);
1006
1007 return 0;
1008}
1009
Oscar Mateoede7d422014-07-24 17:04:12 +01001010void intel_lr_context_free(struct intel_context *ctx)
1011{
Oscar Mateo8c8579172014-07-24 17:04:14 +01001012 int i;
1013
1014 for (i = 0; i < I915_NUM_RINGS; i++) {
1015 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
Oscar Mateo84c23772014-07-24 17:04:15 +01001016 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
1017
Oscar Mateo8c8579172014-07-24 17:04:14 +01001018 if (ctx_obj) {
Oscar Mateo84c23772014-07-24 17:04:15 +01001019 intel_destroy_ringbuffer_obj(ringbuf);
1020 kfree(ringbuf);
Oscar Mateo8c8579172014-07-24 17:04:14 +01001021 i915_gem_object_ggtt_unpin(ctx_obj);
1022 drm_gem_object_unreference(&ctx_obj->base);
1023 }
1024 }
1025}
1026
1027static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
1028{
1029 int ret = 0;
1030
1031 WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
1032
1033 switch (ring->id) {
1034 case RCS:
1035 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
1036 break;
1037 case VCS:
1038 case BCS:
1039 case VECS:
1040 case VCS2:
1041 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
1042 break;
1043 }
1044
1045 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01001046}
1047
1048int intel_lr_context_deferred_create(struct intel_context *ctx,
1049 struct intel_engine_cs *ring)
1050{
Oscar Mateo8c8579172014-07-24 17:04:14 +01001051 struct drm_device *dev = ring->dev;
1052 struct drm_i915_gem_object *ctx_obj;
1053 uint32_t context_size;
Oscar Mateo84c23772014-07-24 17:04:15 +01001054 struct intel_ringbuffer *ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +01001055 int ret;
1056
Oscar Mateoede7d422014-07-24 17:04:12 +01001057 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
Oscar Mateo48d82382014-07-24 17:04:23 +01001058 if (ctx->engine[ring->id].state)
1059 return 0;
Oscar Mateoede7d422014-07-24 17:04:12 +01001060
Oscar Mateo8c8579172014-07-24 17:04:14 +01001061 context_size = round_up(get_lr_context_size(ring), 4096);
1062
1063 ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
1064 if (IS_ERR(ctx_obj)) {
1065 ret = PTR_ERR(ctx_obj);
1066 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
1067 return ret;
1068 }
1069
1070 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
1071 if (ret) {
1072 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
1073 drm_gem_object_unreference(&ctx_obj->base);
1074 return ret;
1075 }
1076
Oscar Mateo84c23772014-07-24 17:04:15 +01001077 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1078 if (!ringbuf) {
1079 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
1080 ring->name);
1081 i915_gem_object_ggtt_unpin(ctx_obj);
1082 drm_gem_object_unreference(&ctx_obj->base);
1083 ret = -ENOMEM;
1084 return ret;
1085 }
1086
Daniel Vetter0c7dd532014-08-11 16:17:44 +02001087 ringbuf->ring = ring;
Oscar Mateo84c23772014-07-24 17:04:15 +01001088 ringbuf->size = 32 * PAGE_SIZE;
1089 ringbuf->effective_size = ringbuf->size;
1090 ringbuf->head = 0;
1091 ringbuf->tail = 0;
1092 ringbuf->space = ringbuf->size;
1093 ringbuf->last_retired_head = -1;
1094
1095 /* TODO: For now we put this in the mappable region so that we can reuse
1096 * the existing ringbuffer code which ioremaps it. When we start
1097 * creating many contexts, this will no longer work and we must switch
1098 * to a kmapish interface.
1099 */
1100 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1101 if (ret) {
1102 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
1103 ring->name, ret);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01001104 goto error;
1105 }
1106
1107 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
1108 if (ret) {
1109 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
1110 intel_destroy_ringbuffer_obj(ringbuf);
1111 goto error;
Oscar Mateo84c23772014-07-24 17:04:15 +01001112 }
1113
1114 ctx->engine[ring->id].ringbuf = ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +01001115 ctx->engine[ring->id].state = ctx_obj;
Oscar Mateoede7d422014-07-24 17:04:12 +01001116
1117 return 0;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01001118
1119error:
1120 kfree(ringbuf);
1121 i915_gem_object_ggtt_unpin(ctx_obj);
1122 drm_gem_object_unreference(&ctx_obj->base);
1123 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01001124}