blob: cd6ddd80e54ca32fe4f73e3c073ef69b17594c1c [file] [log] [blame]
Oscar Mateob20385f2014-07-24 17:04:10 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
31/*
32 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
33 * These expanded contexts enable a number of new abilities, especially
34 * "Execlists" (also implemented in this file).
35 *
36 * Execlists are the new method by which, on gen8+ hardware, workloads are
37 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
38 */
39
40#include <drm/drmP.h>
41#include <drm/i915_drm.h>
42#include "i915_drv.h"
Oscar Mateo127f1002014-07-24 17:04:11 +010043
Oscar Mateo8c8579172014-07-24 17:04:14 +010044#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
45#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
46
47#define GEN8_LR_CONTEXT_ALIGN 4096
48
Oscar Mateo8670d6f2014-07-24 17:04:17 +010049#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
50#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
51
52#define CTX_LRI_HEADER_0 0x01
53#define CTX_CONTEXT_CONTROL 0x02
54#define CTX_RING_HEAD 0x04
55#define CTX_RING_TAIL 0x06
56#define CTX_RING_BUFFER_START 0x08
57#define CTX_RING_BUFFER_CONTROL 0x0a
58#define CTX_BB_HEAD_U 0x0c
59#define CTX_BB_HEAD_L 0x0e
60#define CTX_BB_STATE 0x10
61#define CTX_SECOND_BB_HEAD_U 0x12
62#define CTX_SECOND_BB_HEAD_L 0x14
63#define CTX_SECOND_BB_STATE 0x16
64#define CTX_BB_PER_CTX_PTR 0x18
65#define CTX_RCS_INDIRECT_CTX 0x1a
66#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
67#define CTX_LRI_HEADER_1 0x21
68#define CTX_CTX_TIMESTAMP 0x22
69#define CTX_PDP3_UDW 0x24
70#define CTX_PDP3_LDW 0x26
71#define CTX_PDP2_UDW 0x28
72#define CTX_PDP2_LDW 0x2a
73#define CTX_PDP1_UDW 0x2c
74#define CTX_PDP1_LDW 0x2e
75#define CTX_PDP0_UDW 0x30
76#define CTX_PDP0_LDW 0x32
77#define CTX_LRI_HEADER_2 0x41
78#define CTX_R_PWR_CLK_STATE 0x42
79#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
80
Oscar Mateo127f1002014-07-24 17:04:11 +010081int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
82{
Daniel Vetterbd84b1e2014-08-11 15:57:57 +020083 WARN_ON(i915.enable_ppgtt == -1);
84
Oscar Mateo127f1002014-07-24 17:04:11 +010085 if (enable_execlists == 0)
86 return 0;
87
Oscar Mateo14bf9932014-07-24 17:04:34 +010088 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
89 i915.use_mmio_flip >= 0)
Oscar Mateo127f1002014-07-24 17:04:11 +010090 return 1;
91
92 return 0;
93}
Oscar Mateoede7d422014-07-24 17:04:12 +010094
Oscar Mateoba8b7cc2014-07-24 17:04:33 +010095static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
96{
97 struct intel_engine_cs *ring = ringbuf->ring;
98 uint32_t flush_domains;
99 int ret;
100
101 flush_domains = 0;
102 if (ring->gpu_caches_dirty)
103 flush_domains = I915_GEM_GPU_DOMAINS;
104
105 ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
106 if (ret)
107 return ret;
108
109 ring->gpu_caches_dirty = false;
110 return 0;
111}
112
113static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
114 struct list_head *vmas)
115{
116 struct intel_engine_cs *ring = ringbuf->ring;
117 struct i915_vma *vma;
118 uint32_t flush_domains = 0;
119 bool flush_chipset = false;
120 int ret;
121
122 list_for_each_entry(vma, vmas, exec_list) {
123 struct drm_i915_gem_object *obj = vma->obj;
124
125 ret = i915_gem_object_sync(obj, ring);
126 if (ret)
127 return ret;
128
129 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
130 flush_chipset |= i915_gem_clflush_object(obj, false);
131
132 flush_domains |= obj->base.write_domain;
133 }
134
135 if (flush_domains & I915_GEM_DOMAIN_GTT)
136 wmb();
137
138 /* Unconditionally invalidate gpu caches and ensure that we do flush
139 * any residual writes from the previous batch.
140 */
141 return logical_ring_invalidate_all_caches(ringbuf);
142}
143
Oscar Mateo454afeb2014-07-24 17:04:22 +0100144int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
145 struct intel_engine_cs *ring,
146 struct intel_context *ctx,
147 struct drm_i915_gem_execbuffer2 *args,
148 struct list_head *vmas,
149 struct drm_i915_gem_object *batch_obj,
150 u64 exec_start, u32 flags)
151{
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100152 struct drm_i915_private *dev_priv = dev->dev_private;
153 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
154 int instp_mode;
155 u32 instp_mask;
156 int ret;
157
158 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
159 instp_mask = I915_EXEC_CONSTANTS_MASK;
160 switch (instp_mode) {
161 case I915_EXEC_CONSTANTS_REL_GENERAL:
162 case I915_EXEC_CONSTANTS_ABSOLUTE:
163 case I915_EXEC_CONSTANTS_REL_SURFACE:
164 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
165 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
166 return -EINVAL;
167 }
168
169 if (instp_mode != dev_priv->relative_constants_mode) {
170 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
171 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
172 return -EINVAL;
173 }
174
175 /* The HW changed the meaning on this bit on gen6 */
176 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
177 }
178 break;
179 default:
180 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
181 return -EINVAL;
182 }
183
184 if (args->num_cliprects != 0) {
185 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
186 return -EINVAL;
187 } else {
188 if (args->DR4 == 0xffffffff) {
189 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
190 args->DR4 = 0;
191 }
192
193 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
194 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
195 return -EINVAL;
196 }
197 }
198
199 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
200 DRM_DEBUG("sol reset is gen7 only\n");
201 return -EINVAL;
202 }
203
204 ret = execlists_move_to_gpu(ringbuf, vmas);
205 if (ret)
206 return ret;
207
208 if (ring == &dev_priv->ring[RCS] &&
209 instp_mode != dev_priv->relative_constants_mode) {
210 ret = intel_logical_ring_begin(ringbuf, 4);
211 if (ret)
212 return ret;
213
214 intel_logical_ring_emit(ringbuf, MI_NOOP);
215 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
216 intel_logical_ring_emit(ringbuf, INSTPM);
217 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
218 intel_logical_ring_advance(ringbuf);
219
220 dev_priv->relative_constants_mode = instp_mode;
221 }
222
223 ret = ring->emit_bb_start(ringbuf, exec_start, flags);
224 if (ret)
225 return ret;
226
227 i915_gem_execbuffer_move_to_active(vmas, ring);
228 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
229
Oscar Mateo454afeb2014-07-24 17:04:22 +0100230 return 0;
231}
232
233void intel_logical_ring_stop(struct intel_engine_cs *ring)
234{
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100235 struct drm_i915_private *dev_priv = ring->dev->dev_private;
236 int ret;
237
238 if (!intel_ring_initialized(ring))
239 return;
240
241 ret = intel_ring_idle(ring);
242 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
243 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
244 ring->name, ret);
245
246 /* TODO: Is this correct with Execlists enabled? */
247 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
248 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
249 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
250 return;
251 }
252 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
Oscar Mateo454afeb2014-07-24 17:04:22 +0100253}
254
Oscar Mateo48e29f52014-07-24 17:04:29 +0100255int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
256{
257 struct intel_engine_cs *ring = ringbuf->ring;
258 int ret;
259
260 if (!ring->gpu_caches_dirty)
261 return 0;
262
263 ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
264 if (ret)
265 return ret;
266
267 ring->gpu_caches_dirty = false;
268 return 0;
269}
270
Oscar Mateo82e104c2014-07-24 17:04:26 +0100271void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
272{
273 intel_logical_ring_advance(ringbuf);
274
275 if (intel_ring_stopped(ringbuf->ring))
276 return;
277
278 /* TODO: how to submit a context to the ELSP is not here yet */
279}
280
Oscar Mateo48e29f52014-07-24 17:04:29 +0100281static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
282 struct intel_context *ctx)
Oscar Mateo82e104c2014-07-24 17:04:26 +0100283{
284 if (ring->outstanding_lazy_seqno)
285 return 0;
286
287 if (ring->preallocated_lazy_request == NULL) {
288 struct drm_i915_gem_request *request;
289
290 request = kmalloc(sizeof(*request), GFP_KERNEL);
291 if (request == NULL)
292 return -ENOMEM;
293
Oscar Mateo48e29f52014-07-24 17:04:29 +0100294 /* Hold a reference to the context this request belongs to
295 * (we will need it when the time comes to emit/retire the
296 * request).
297 */
298 request->ctx = ctx;
299 i915_gem_context_reference(request->ctx);
300
Oscar Mateo82e104c2014-07-24 17:04:26 +0100301 ring->preallocated_lazy_request = request;
302 }
303
304 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
305}
306
307static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
308 int bytes)
309{
310 struct intel_engine_cs *ring = ringbuf->ring;
311 struct drm_i915_gem_request *request;
312 u32 seqno = 0;
313 int ret;
314
315 if (ringbuf->last_retired_head != -1) {
316 ringbuf->head = ringbuf->last_retired_head;
317 ringbuf->last_retired_head = -1;
318
319 ringbuf->space = intel_ring_space(ringbuf);
320 if (ringbuf->space >= bytes)
321 return 0;
322 }
323
324 list_for_each_entry(request, &ring->request_list, list) {
325 if (__intel_ring_space(request->tail, ringbuf->tail,
326 ringbuf->size) >= bytes) {
327 seqno = request->seqno;
328 break;
329 }
330 }
331
332 if (seqno == 0)
333 return -ENOSPC;
334
335 ret = i915_wait_seqno(ring, seqno);
336 if (ret)
337 return ret;
338
Oscar Mateo82e104c2014-07-24 17:04:26 +0100339 i915_gem_retire_requests_ring(ring);
340 ringbuf->head = ringbuf->last_retired_head;
341 ringbuf->last_retired_head = -1;
342
343 ringbuf->space = intel_ring_space(ringbuf);
344 return 0;
345}
346
347static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
348 int bytes)
349{
350 struct intel_engine_cs *ring = ringbuf->ring;
351 struct drm_device *dev = ring->dev;
352 struct drm_i915_private *dev_priv = dev->dev_private;
353 unsigned long end;
354 int ret;
355
356 ret = logical_ring_wait_request(ringbuf, bytes);
357 if (ret != -ENOSPC)
358 return ret;
359
360 /* Force the context submission in case we have been skipping it */
361 intel_logical_ring_advance_and_submit(ringbuf);
362
363 /* With GEM the hangcheck timer should kick us out of the loop,
364 * leaving it early runs the risk of corrupting GEM state (due
365 * to running on almost untested codepaths). But on resume
366 * timers don't work yet, so prevent a complete hang in that
367 * case by choosing an insanely large timeout. */
368 end = jiffies + 60 * HZ;
369
370 do {
371 ringbuf->head = I915_READ_HEAD(ring);
372 ringbuf->space = intel_ring_space(ringbuf);
373 if (ringbuf->space >= bytes) {
374 ret = 0;
375 break;
376 }
377
378 msleep(1);
379
380 if (dev_priv->mm.interruptible && signal_pending(current)) {
381 ret = -ERESTARTSYS;
382 break;
383 }
384
385 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
386 dev_priv->mm.interruptible);
387 if (ret)
388 break;
389
390 if (time_after(jiffies, end)) {
391 ret = -EBUSY;
392 break;
393 }
394 } while (1);
395
396 return ret;
397}
398
399static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
400{
401 uint32_t __iomem *virt;
402 int rem = ringbuf->size - ringbuf->tail;
403
404 if (ringbuf->space < rem) {
405 int ret = logical_ring_wait_for_space(ringbuf, rem);
406
407 if (ret)
408 return ret;
409 }
410
411 virt = ringbuf->virtual_start + ringbuf->tail;
412 rem /= 4;
413 while (rem--)
414 iowrite32(MI_NOOP, virt++);
415
416 ringbuf->tail = 0;
417 ringbuf->space = intel_ring_space(ringbuf);
418
419 return 0;
420}
421
422static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
423{
424 int ret;
425
426 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
427 ret = logical_ring_wrap_buffer(ringbuf);
428 if (unlikely(ret))
429 return ret;
430 }
431
432 if (unlikely(ringbuf->space < bytes)) {
433 ret = logical_ring_wait_for_space(ringbuf, bytes);
434 if (unlikely(ret))
435 return ret;
436 }
437
438 return 0;
439}
440
441int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
442{
443 struct intel_engine_cs *ring = ringbuf->ring;
444 struct drm_device *dev = ring->dev;
445 struct drm_i915_private *dev_priv = dev->dev_private;
446 int ret;
447
448 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
449 dev_priv->mm.interruptible);
450 if (ret)
451 return ret;
452
453 ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
454 if (ret)
455 return ret;
456
457 /* Preallocate the olr before touching the ring */
Oscar Mateo48e29f52014-07-24 17:04:29 +0100458 ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
Oscar Mateo82e104c2014-07-24 17:04:26 +0100459 if (ret)
460 return ret;
461
462 ringbuf->space -= num_dwords * sizeof(uint32_t);
463 return 0;
464}
465
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100466static int gen8_init_common_ring(struct intel_engine_cs *ring)
467{
468 struct drm_device *dev = ring->dev;
469 struct drm_i915_private *dev_priv = dev->dev_private;
470
Oscar Mateo73d477f2014-07-24 17:04:31 +0100471 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
472 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
473
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100474 I915_WRITE(RING_MODE_GEN7(ring),
475 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
476 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
477 POSTING_READ(RING_MODE_GEN7(ring));
478 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
479
480 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
481
482 return 0;
483}
484
485static int gen8_init_render_ring(struct intel_engine_cs *ring)
486{
487 struct drm_device *dev = ring->dev;
488 struct drm_i915_private *dev_priv = dev->dev_private;
489 int ret;
490
491 ret = gen8_init_common_ring(ring);
492 if (ret)
493 return ret;
494
495 /* We need to disable the AsyncFlip performance optimisations in order
496 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
497 * programmed to '1' on all products.
498 *
499 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
500 */
501 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
502
503 ret = intel_init_pipe_control(ring);
504 if (ret)
505 return ret;
506
507 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
508
509 return ret;
510}
511
Oscar Mateo15648582014-07-24 17:04:32 +0100512static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
513 u64 offset, unsigned flags)
514{
Oscar Mateo15648582014-07-24 17:04:32 +0100515 bool ppgtt = !(flags & I915_DISPATCH_SECURE);
516 int ret;
517
518 ret = intel_logical_ring_begin(ringbuf, 4);
519 if (ret)
520 return ret;
521
522 /* FIXME(BDW): Address space and security selectors. */
523 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
524 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
525 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
526 intel_logical_ring_emit(ringbuf, MI_NOOP);
527 intel_logical_ring_advance(ringbuf);
528
529 return 0;
530}
531
Oscar Mateo73d477f2014-07-24 17:04:31 +0100532static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
533{
534 struct drm_device *dev = ring->dev;
535 struct drm_i915_private *dev_priv = dev->dev_private;
536 unsigned long flags;
537
538 if (!dev->irq_enabled)
539 return false;
540
541 spin_lock_irqsave(&dev_priv->irq_lock, flags);
542 if (ring->irq_refcount++ == 0) {
543 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
544 POSTING_READ(RING_IMR(ring->mmio_base));
545 }
546 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
547
548 return true;
549}
550
551static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
552{
553 struct drm_device *dev = ring->dev;
554 struct drm_i915_private *dev_priv = dev->dev_private;
555 unsigned long flags;
556
557 spin_lock_irqsave(&dev_priv->irq_lock, flags);
558 if (--ring->irq_refcount == 0) {
559 I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
560 POSTING_READ(RING_IMR(ring->mmio_base));
561 }
562 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
563}
564
Oscar Mateo47122742014-07-24 17:04:28 +0100565static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
566 u32 invalidate_domains,
567 u32 unused)
568{
569 struct intel_engine_cs *ring = ringbuf->ring;
570 struct drm_device *dev = ring->dev;
571 struct drm_i915_private *dev_priv = dev->dev_private;
572 uint32_t cmd;
573 int ret;
574
575 ret = intel_logical_ring_begin(ringbuf, 4);
576 if (ret)
577 return ret;
578
579 cmd = MI_FLUSH_DW + 1;
580
581 if (ring == &dev_priv->ring[VCS]) {
582 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
583 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
584 MI_FLUSH_DW_STORE_INDEX |
585 MI_FLUSH_DW_OP_STOREDW;
586 } else {
587 if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
588 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
589 MI_FLUSH_DW_OP_STOREDW;
590 }
591
592 intel_logical_ring_emit(ringbuf, cmd);
593 intel_logical_ring_emit(ringbuf,
594 I915_GEM_HWS_SCRATCH_ADDR |
595 MI_FLUSH_DW_USE_GTT);
596 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
597 intel_logical_ring_emit(ringbuf, 0); /* value */
598 intel_logical_ring_advance(ringbuf);
599
600 return 0;
601}
602
603static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
604 u32 invalidate_domains,
605 u32 flush_domains)
606{
607 struct intel_engine_cs *ring = ringbuf->ring;
608 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
609 u32 flags = 0;
610 int ret;
611
612 flags |= PIPE_CONTROL_CS_STALL;
613
614 if (flush_domains) {
615 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
616 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
617 }
618
619 if (invalidate_domains) {
620 flags |= PIPE_CONTROL_TLB_INVALIDATE;
621 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
622 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
623 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
624 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
625 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
626 flags |= PIPE_CONTROL_QW_WRITE;
627 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
628 }
629
630 ret = intel_logical_ring_begin(ringbuf, 6);
631 if (ret)
632 return ret;
633
634 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
635 intel_logical_ring_emit(ringbuf, flags);
636 intel_logical_ring_emit(ringbuf, scratch_addr);
637 intel_logical_ring_emit(ringbuf, 0);
638 intel_logical_ring_emit(ringbuf, 0);
639 intel_logical_ring_emit(ringbuf, 0);
640 intel_logical_ring_advance(ringbuf);
641
642 return 0;
643}
644
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100645static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
646{
647 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
648}
649
650static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
651{
652 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
653}
654
Oscar Mateo4da46e12014-07-24 17:04:27 +0100655static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
656{
657 struct intel_engine_cs *ring = ringbuf->ring;
658 u32 cmd;
659 int ret;
660
661 ret = intel_logical_ring_begin(ringbuf, 6);
662 if (ret)
663 return ret;
664
665 cmd = MI_STORE_DWORD_IMM_GEN8;
666 cmd |= MI_GLOBAL_GTT;
667
668 intel_logical_ring_emit(ringbuf, cmd);
669 intel_logical_ring_emit(ringbuf,
670 (ring->status_page.gfx_addr +
671 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
672 intel_logical_ring_emit(ringbuf, 0);
673 intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
674 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
675 intel_logical_ring_emit(ringbuf, MI_NOOP);
676 intel_logical_ring_advance_and_submit(ringbuf);
677
678 return 0;
679}
680
Oscar Mateo454afeb2014-07-24 17:04:22 +0100681void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
682{
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100683 struct drm_i915_private *dev_priv = ring->dev->dev_private;
684
Oscar Mateo48d82382014-07-24 17:04:23 +0100685 if (!intel_ring_initialized(ring))
686 return;
687
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100688 intel_logical_ring_stop(ring);
689 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
Oscar Mateo48d82382014-07-24 17:04:23 +0100690 ring->preallocated_lazy_request = NULL;
691 ring->outstanding_lazy_seqno = 0;
692
693 if (ring->cleanup)
694 ring->cleanup(ring);
695
696 i915_cmd_parser_fini_ring(ring);
697
698 if (ring->status_page.obj) {
699 kunmap(sg_page(ring->status_page.obj->pages->sgl));
700 ring->status_page.obj = NULL;
701 }
Oscar Mateo454afeb2014-07-24 17:04:22 +0100702}
703
704static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
705{
Oscar Mateo48d82382014-07-24 17:04:23 +0100706 int ret;
707 struct intel_context *dctx = ring->default_context;
708 struct drm_i915_gem_object *dctx_obj;
709
710 /* Intentionally left blank. */
711 ring->buffer = NULL;
712
713 ring->dev = dev;
714 INIT_LIST_HEAD(&ring->active_list);
715 INIT_LIST_HEAD(&ring->request_list);
716 init_waitqueue_head(&ring->irq_queue);
717
718 ret = intel_lr_context_deferred_create(dctx, ring);
719 if (ret)
720 return ret;
721
722 /* The status page is offset 0 from the context object in LRCs. */
723 dctx_obj = dctx->engine[ring->id].state;
724 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
725 ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
726 if (ring->status_page.page_addr == NULL)
727 return -ENOMEM;
728 ring->status_page.obj = dctx_obj;
729
730 ret = i915_cmd_parser_init_ring(ring);
731 if (ret)
732 return ret;
733
734 if (ring->init) {
735 ret = ring->init(ring);
736 if (ret)
737 return ret;
738 }
739
Oscar Mateo454afeb2014-07-24 17:04:22 +0100740 return 0;
741}
742
743static int logical_render_ring_init(struct drm_device *dev)
744{
745 struct drm_i915_private *dev_priv = dev->dev_private;
746 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
747
748 ring->name = "render ring";
749 ring->id = RCS;
750 ring->mmio_base = RENDER_RING_BASE;
751 ring->irq_enable_mask =
752 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100753 ring->irq_keep_mask =
754 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
755 if (HAS_L3_DPF(dev))
756 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100757
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100758 ring->init = gen8_init_render_ring;
759 ring->cleanup = intel_fini_pipe_control;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100760 ring->get_seqno = gen8_get_seqno;
761 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100762 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100763 ring->emit_flush = gen8_emit_flush_render;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100764 ring->irq_get = gen8_logical_ring_get_irq;
765 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100766 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100767
Oscar Mateo454afeb2014-07-24 17:04:22 +0100768 return logical_ring_init(dev, ring);
769}
770
771static int logical_bsd_ring_init(struct drm_device *dev)
772{
773 struct drm_i915_private *dev_priv = dev->dev_private;
774 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
775
776 ring->name = "bsd ring";
777 ring->id = VCS;
778 ring->mmio_base = GEN6_BSD_RING_BASE;
779 ring->irq_enable_mask =
780 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100781 ring->irq_keep_mask =
782 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100783
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100784 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100785 ring->get_seqno = gen8_get_seqno;
786 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100787 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100788 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100789 ring->irq_get = gen8_logical_ring_get_irq;
790 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100791 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100792
Oscar Mateo454afeb2014-07-24 17:04:22 +0100793 return logical_ring_init(dev, ring);
794}
795
796static int logical_bsd2_ring_init(struct drm_device *dev)
797{
798 struct drm_i915_private *dev_priv = dev->dev_private;
799 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
800
801 ring->name = "bds2 ring";
802 ring->id = VCS2;
803 ring->mmio_base = GEN8_BSD2_RING_BASE;
804 ring->irq_enable_mask =
805 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100806 ring->irq_keep_mask =
807 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100808
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100809 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100810 ring->get_seqno = gen8_get_seqno;
811 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100812 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100813 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100814 ring->irq_get = gen8_logical_ring_get_irq;
815 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100816 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100817
Oscar Mateo454afeb2014-07-24 17:04:22 +0100818 return logical_ring_init(dev, ring);
819}
820
821static int logical_blt_ring_init(struct drm_device *dev)
822{
823 struct drm_i915_private *dev_priv = dev->dev_private;
824 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
825
826 ring->name = "blitter ring";
827 ring->id = BCS;
828 ring->mmio_base = BLT_RING_BASE;
829 ring->irq_enable_mask =
830 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100831 ring->irq_keep_mask =
832 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100833
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100834 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100835 ring->get_seqno = gen8_get_seqno;
836 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100837 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100838 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100839 ring->irq_get = gen8_logical_ring_get_irq;
840 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100841 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100842
Oscar Mateo454afeb2014-07-24 17:04:22 +0100843 return logical_ring_init(dev, ring);
844}
845
846static int logical_vebox_ring_init(struct drm_device *dev)
847{
848 struct drm_i915_private *dev_priv = dev->dev_private;
849 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
850
851 ring->name = "video enhancement ring";
852 ring->id = VECS;
853 ring->mmio_base = VEBOX_RING_BASE;
854 ring->irq_enable_mask =
855 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100856 ring->irq_keep_mask =
857 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100858
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100859 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100860 ring->get_seqno = gen8_get_seqno;
861 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100862 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100863 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100864 ring->irq_get = gen8_logical_ring_get_irq;
865 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100866 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100867
Oscar Mateo454afeb2014-07-24 17:04:22 +0100868 return logical_ring_init(dev, ring);
869}
870
871int intel_logical_rings_init(struct drm_device *dev)
872{
873 struct drm_i915_private *dev_priv = dev->dev_private;
874 int ret;
875
876 ret = logical_render_ring_init(dev);
877 if (ret)
878 return ret;
879
880 if (HAS_BSD(dev)) {
881 ret = logical_bsd_ring_init(dev);
882 if (ret)
883 goto cleanup_render_ring;
884 }
885
886 if (HAS_BLT(dev)) {
887 ret = logical_blt_ring_init(dev);
888 if (ret)
889 goto cleanup_bsd_ring;
890 }
891
892 if (HAS_VEBOX(dev)) {
893 ret = logical_vebox_ring_init(dev);
894 if (ret)
895 goto cleanup_blt_ring;
896 }
897
898 if (HAS_BSD2(dev)) {
899 ret = logical_bsd2_ring_init(dev);
900 if (ret)
901 goto cleanup_vebox_ring;
902 }
903
904 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
905 if (ret)
906 goto cleanup_bsd2_ring;
907
908 return 0;
909
910cleanup_bsd2_ring:
911 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
912cleanup_vebox_ring:
913 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
914cleanup_blt_ring:
915 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
916cleanup_bsd_ring:
917 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
918cleanup_render_ring:
919 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
920
921 return ret;
922}
923
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100924static int
925populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
926 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
927{
928 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
Daniel Vetterae6c4802014-08-06 15:04:53 +0200929 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100930 struct page *page;
931 uint32_t *reg_state;
932 int ret;
933
934 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
935 if (ret) {
936 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
937 return ret;
938 }
939
940 ret = i915_gem_object_get_pages(ctx_obj);
941 if (ret) {
942 DRM_DEBUG_DRIVER("Could not get object pages\n");
943 return ret;
944 }
945
946 i915_gem_object_pin_pages(ctx_obj);
947
948 /* The second page of the context object contains some fields which must
949 * be set up prior to the first execution. */
950 page = i915_gem_object_get_page(ctx_obj, 1);
951 reg_state = kmap_atomic(page);
952
953 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
954 * commands followed by (reg, value) pairs. The values we are setting here are
955 * only for the first context restore: on a subsequent save, the GPU will
956 * recreate this batchbuffer with new values (including all the missing
957 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
958 if (ring->id == RCS)
959 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
960 else
961 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
962 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
963 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
964 reg_state[CTX_CONTEXT_CONTROL+1] =
965 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
966 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
967 reg_state[CTX_RING_HEAD+1] = 0;
968 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
969 reg_state[CTX_RING_TAIL+1] = 0;
970 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
971 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
972 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
973 reg_state[CTX_RING_BUFFER_CONTROL+1] =
974 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
975 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
976 reg_state[CTX_BB_HEAD_U+1] = 0;
977 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
978 reg_state[CTX_BB_HEAD_L+1] = 0;
979 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
980 reg_state[CTX_BB_STATE+1] = (1<<5);
981 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
982 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
983 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
984 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
985 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
986 reg_state[CTX_SECOND_BB_STATE+1] = 0;
987 if (ring->id == RCS) {
988 /* TODO: according to BSpec, the register state context
989 * for CHV does not have these. OTOH, these registers do
990 * exist in CHV. I'm waiting for a clarification */
991 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
992 reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
993 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
994 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
995 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
996 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
997 }
998 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
999 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
1000 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
1001 reg_state[CTX_CTX_TIMESTAMP+1] = 0;
1002 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
1003 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
1004 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
1005 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
1006 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
1007 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
1008 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
1009 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
1010 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
1011 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
1012 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
1013 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
1014 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
1015 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
1016 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
1017 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
1018 if (ring->id == RCS) {
1019 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
1020 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
1021 reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
1022 }
1023
1024 kunmap_atomic(reg_state);
1025
1026 ctx_obj->dirty = 1;
1027 set_page_dirty(page);
1028 i915_gem_object_unpin_pages(ctx_obj);
1029
1030 return 0;
1031}
1032
Oscar Mateoede7d422014-07-24 17:04:12 +01001033void intel_lr_context_free(struct intel_context *ctx)
1034{
Oscar Mateo8c8579172014-07-24 17:04:14 +01001035 int i;
1036
1037 for (i = 0; i < I915_NUM_RINGS; i++) {
1038 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
Oscar Mateo84c23772014-07-24 17:04:15 +01001039 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
1040
Oscar Mateo8c8579172014-07-24 17:04:14 +01001041 if (ctx_obj) {
Oscar Mateo84c23772014-07-24 17:04:15 +01001042 intel_destroy_ringbuffer_obj(ringbuf);
1043 kfree(ringbuf);
Oscar Mateo8c8579172014-07-24 17:04:14 +01001044 i915_gem_object_ggtt_unpin(ctx_obj);
1045 drm_gem_object_unreference(&ctx_obj->base);
1046 }
1047 }
1048}
1049
1050static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
1051{
1052 int ret = 0;
1053
1054 WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
1055
1056 switch (ring->id) {
1057 case RCS:
1058 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
1059 break;
1060 case VCS:
1061 case BCS:
1062 case VECS:
1063 case VCS2:
1064 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
1065 break;
1066 }
1067
1068 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01001069}
1070
1071int intel_lr_context_deferred_create(struct intel_context *ctx,
1072 struct intel_engine_cs *ring)
1073{
Oscar Mateo8c8579172014-07-24 17:04:14 +01001074 struct drm_device *dev = ring->dev;
1075 struct drm_i915_gem_object *ctx_obj;
1076 uint32_t context_size;
Oscar Mateo84c23772014-07-24 17:04:15 +01001077 struct intel_ringbuffer *ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +01001078 int ret;
1079
Oscar Mateoede7d422014-07-24 17:04:12 +01001080 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
Oscar Mateo48d82382014-07-24 17:04:23 +01001081 if (ctx->engine[ring->id].state)
1082 return 0;
Oscar Mateoede7d422014-07-24 17:04:12 +01001083
Oscar Mateo8c8579172014-07-24 17:04:14 +01001084 context_size = round_up(get_lr_context_size(ring), 4096);
1085
1086 ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
1087 if (IS_ERR(ctx_obj)) {
1088 ret = PTR_ERR(ctx_obj);
1089 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
1090 return ret;
1091 }
1092
1093 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
1094 if (ret) {
1095 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
1096 drm_gem_object_unreference(&ctx_obj->base);
1097 return ret;
1098 }
1099
Oscar Mateo84c23772014-07-24 17:04:15 +01001100 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1101 if (!ringbuf) {
1102 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
1103 ring->name);
1104 i915_gem_object_ggtt_unpin(ctx_obj);
1105 drm_gem_object_unreference(&ctx_obj->base);
1106 ret = -ENOMEM;
1107 return ret;
1108 }
1109
Daniel Vetter0c7dd532014-08-11 16:17:44 +02001110 ringbuf->ring = ring;
Oscar Mateo582d67f2014-07-24 17:04:16 +01001111 ringbuf->FIXME_lrc_ctx = ctx;
1112
Oscar Mateo84c23772014-07-24 17:04:15 +01001113 ringbuf->size = 32 * PAGE_SIZE;
1114 ringbuf->effective_size = ringbuf->size;
1115 ringbuf->head = 0;
1116 ringbuf->tail = 0;
1117 ringbuf->space = ringbuf->size;
1118 ringbuf->last_retired_head = -1;
1119
1120 /* TODO: For now we put this in the mappable region so that we can reuse
1121 * the existing ringbuffer code which ioremaps it. When we start
1122 * creating many contexts, this will no longer work and we must switch
1123 * to a kmapish interface.
1124 */
1125 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1126 if (ret) {
1127 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
1128 ring->name, ret);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01001129 goto error;
1130 }
1131
1132 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
1133 if (ret) {
1134 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
1135 intel_destroy_ringbuffer_obj(ringbuf);
1136 goto error;
Oscar Mateo84c23772014-07-24 17:04:15 +01001137 }
1138
1139 ctx->engine[ring->id].ringbuf = ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +01001140 ctx->engine[ring->id].state = ctx_obj;
Oscar Mateoede7d422014-07-24 17:04:12 +01001141
1142 return 0;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01001143
1144error:
1145 kfree(ringbuf);
1146 i915_gem_object_ggtt_unpin(ctx_obj);
1147 drm_gem_object_unreference(&ctx_obj->base);
1148 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01001149}