blob: e0d4ef2a5c30b6d2cac1f9c66fef80db167dbe32 [file] [log] [blame]
Oscar Mateob20385f2014-07-24 17:04:10 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
31/*
32 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
33 * These expanded contexts enable a number of new abilities, especially
34 * "Execlists" (also implemented in this file).
35 *
36 * Execlists are the new method by which, on gen8+ hardware, workloads are
37 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
38 */
39
40#include <drm/drmP.h>
41#include <drm/i915_drm.h>
42#include "i915_drv.h"
Oscar Mateo127f1002014-07-24 17:04:11 +010043
Oscar Mateo8c8579172014-07-24 17:04:14 +010044#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
45#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
46
47#define GEN8_LR_CONTEXT_ALIGN 4096
48
Oscar Mateo8670d6f2014-07-24 17:04:17 +010049#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
50#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
51
52#define CTX_LRI_HEADER_0 0x01
53#define CTX_CONTEXT_CONTROL 0x02
54#define CTX_RING_HEAD 0x04
55#define CTX_RING_TAIL 0x06
56#define CTX_RING_BUFFER_START 0x08
57#define CTX_RING_BUFFER_CONTROL 0x0a
58#define CTX_BB_HEAD_U 0x0c
59#define CTX_BB_HEAD_L 0x0e
60#define CTX_BB_STATE 0x10
61#define CTX_SECOND_BB_HEAD_U 0x12
62#define CTX_SECOND_BB_HEAD_L 0x14
63#define CTX_SECOND_BB_STATE 0x16
64#define CTX_BB_PER_CTX_PTR 0x18
65#define CTX_RCS_INDIRECT_CTX 0x1a
66#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
67#define CTX_LRI_HEADER_1 0x21
68#define CTX_CTX_TIMESTAMP 0x22
69#define CTX_PDP3_UDW 0x24
70#define CTX_PDP3_LDW 0x26
71#define CTX_PDP2_UDW 0x28
72#define CTX_PDP2_LDW 0x2a
73#define CTX_PDP1_UDW 0x2c
74#define CTX_PDP1_LDW 0x2e
75#define CTX_PDP0_UDW 0x30
76#define CTX_PDP0_LDW 0x32
77#define CTX_LRI_HEADER_2 0x41
78#define CTX_R_PWR_CLK_STATE 0x42
79#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
80
Oscar Mateo127f1002014-07-24 17:04:11 +010081int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
82{
Daniel Vetterbd84b1e2014-08-11 15:57:57 +020083 WARN_ON(i915.enable_ppgtt == -1);
84
Oscar Mateo127f1002014-07-24 17:04:11 +010085 if (enable_execlists == 0)
86 return 0;
87
88 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev))
89 return 1;
90
91 return 0;
92}
Oscar Mateoede7d422014-07-24 17:04:12 +010093
Oscar Mateo454afeb2014-07-24 17:04:22 +010094int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
95 struct intel_engine_cs *ring,
96 struct intel_context *ctx,
97 struct drm_i915_gem_execbuffer2 *args,
98 struct list_head *vmas,
99 struct drm_i915_gem_object *batch_obj,
100 u64 exec_start, u32 flags)
101{
102 /* TODO */
103 return 0;
104}
105
106void intel_logical_ring_stop(struct intel_engine_cs *ring)
107{
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100108 struct drm_i915_private *dev_priv = ring->dev->dev_private;
109 int ret;
110
111 if (!intel_ring_initialized(ring))
112 return;
113
114 ret = intel_ring_idle(ring);
115 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
116 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
117 ring->name, ret);
118
119 /* TODO: Is this correct with Execlists enabled? */
120 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
121 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
122 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
123 return;
124 }
125 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
Oscar Mateo454afeb2014-07-24 17:04:22 +0100126}
127
Oscar Mateo82e104c2014-07-24 17:04:26 +0100128void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
129{
130 intel_logical_ring_advance(ringbuf);
131
132 if (intel_ring_stopped(ringbuf->ring))
133 return;
134
135 /* TODO: how to submit a context to the ELSP is not here yet */
136}
137
138static int logical_ring_alloc_seqno(struct intel_engine_cs *ring)
139{
140 if (ring->outstanding_lazy_seqno)
141 return 0;
142
143 if (ring->preallocated_lazy_request == NULL) {
144 struct drm_i915_gem_request *request;
145
146 request = kmalloc(sizeof(*request), GFP_KERNEL);
147 if (request == NULL)
148 return -ENOMEM;
149
150 ring->preallocated_lazy_request = request;
151 }
152
153 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
154}
155
156static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
157 int bytes)
158{
159 struct intel_engine_cs *ring = ringbuf->ring;
160 struct drm_i915_gem_request *request;
161 u32 seqno = 0;
162 int ret;
163
164 if (ringbuf->last_retired_head != -1) {
165 ringbuf->head = ringbuf->last_retired_head;
166 ringbuf->last_retired_head = -1;
167
168 ringbuf->space = intel_ring_space(ringbuf);
169 if (ringbuf->space >= bytes)
170 return 0;
171 }
172
173 list_for_each_entry(request, &ring->request_list, list) {
174 if (__intel_ring_space(request->tail, ringbuf->tail,
175 ringbuf->size) >= bytes) {
176 seqno = request->seqno;
177 break;
178 }
179 }
180
181 if (seqno == 0)
182 return -ENOSPC;
183
184 ret = i915_wait_seqno(ring, seqno);
185 if (ret)
186 return ret;
187
188 /* TODO: make sure we update the right ringbuffer's last_retired_head
189 * when retiring requests */
190 i915_gem_retire_requests_ring(ring);
191 ringbuf->head = ringbuf->last_retired_head;
192 ringbuf->last_retired_head = -1;
193
194 ringbuf->space = intel_ring_space(ringbuf);
195 return 0;
196}
197
198static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
199 int bytes)
200{
201 struct intel_engine_cs *ring = ringbuf->ring;
202 struct drm_device *dev = ring->dev;
203 struct drm_i915_private *dev_priv = dev->dev_private;
204 unsigned long end;
205 int ret;
206
207 ret = logical_ring_wait_request(ringbuf, bytes);
208 if (ret != -ENOSPC)
209 return ret;
210
211 /* Force the context submission in case we have been skipping it */
212 intel_logical_ring_advance_and_submit(ringbuf);
213
214 /* With GEM the hangcheck timer should kick us out of the loop,
215 * leaving it early runs the risk of corrupting GEM state (due
216 * to running on almost untested codepaths). But on resume
217 * timers don't work yet, so prevent a complete hang in that
218 * case by choosing an insanely large timeout. */
219 end = jiffies + 60 * HZ;
220
221 do {
222 ringbuf->head = I915_READ_HEAD(ring);
223 ringbuf->space = intel_ring_space(ringbuf);
224 if (ringbuf->space >= bytes) {
225 ret = 0;
226 break;
227 }
228
229 msleep(1);
230
231 if (dev_priv->mm.interruptible && signal_pending(current)) {
232 ret = -ERESTARTSYS;
233 break;
234 }
235
236 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
237 dev_priv->mm.interruptible);
238 if (ret)
239 break;
240
241 if (time_after(jiffies, end)) {
242 ret = -EBUSY;
243 break;
244 }
245 } while (1);
246
247 return ret;
248}
249
250static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
251{
252 uint32_t __iomem *virt;
253 int rem = ringbuf->size - ringbuf->tail;
254
255 if (ringbuf->space < rem) {
256 int ret = logical_ring_wait_for_space(ringbuf, rem);
257
258 if (ret)
259 return ret;
260 }
261
262 virt = ringbuf->virtual_start + ringbuf->tail;
263 rem /= 4;
264 while (rem--)
265 iowrite32(MI_NOOP, virt++);
266
267 ringbuf->tail = 0;
268 ringbuf->space = intel_ring_space(ringbuf);
269
270 return 0;
271}
272
273static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
274{
275 int ret;
276
277 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
278 ret = logical_ring_wrap_buffer(ringbuf);
279 if (unlikely(ret))
280 return ret;
281 }
282
283 if (unlikely(ringbuf->space < bytes)) {
284 ret = logical_ring_wait_for_space(ringbuf, bytes);
285 if (unlikely(ret))
286 return ret;
287 }
288
289 return 0;
290}
291
292int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
293{
294 struct intel_engine_cs *ring = ringbuf->ring;
295 struct drm_device *dev = ring->dev;
296 struct drm_i915_private *dev_priv = dev->dev_private;
297 int ret;
298
299 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
300 dev_priv->mm.interruptible);
301 if (ret)
302 return ret;
303
304 ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
305 if (ret)
306 return ret;
307
308 /* Preallocate the olr before touching the ring */
309 ret = logical_ring_alloc_seqno(ring);
310 if (ret)
311 return ret;
312
313 ringbuf->space -= num_dwords * sizeof(uint32_t);
314 return 0;
315}
316
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100317static int gen8_init_common_ring(struct intel_engine_cs *ring)
318{
319 struct drm_device *dev = ring->dev;
320 struct drm_i915_private *dev_priv = dev->dev_private;
321
Oscar Mateo73d477f2014-07-24 17:04:31 +0100322 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
323 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
324
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100325 I915_WRITE(RING_MODE_GEN7(ring),
326 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
327 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
328 POSTING_READ(RING_MODE_GEN7(ring));
329 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
330
331 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
332
333 return 0;
334}
335
336static int gen8_init_render_ring(struct intel_engine_cs *ring)
337{
338 struct drm_device *dev = ring->dev;
339 struct drm_i915_private *dev_priv = dev->dev_private;
340 int ret;
341
342 ret = gen8_init_common_ring(ring);
343 if (ret)
344 return ret;
345
346 /* We need to disable the AsyncFlip performance optimisations in order
347 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
348 * programmed to '1' on all products.
349 *
350 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
351 */
352 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
353
354 ret = intel_init_pipe_control(ring);
355 if (ret)
356 return ret;
357
358 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
359
360 return ret;
361}
362
Oscar Mateo15648582014-07-24 17:04:32 +0100363static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
364 u64 offset, unsigned flags)
365{
366 struct intel_engine_cs *ring = ringbuf->ring;
367 struct drm_i915_private *dev_priv = ring->dev->dev_private;
368 bool ppgtt = !(flags & I915_DISPATCH_SECURE);
369 int ret;
370
371 ret = intel_logical_ring_begin(ringbuf, 4);
372 if (ret)
373 return ret;
374
375 /* FIXME(BDW): Address space and security selectors. */
376 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
377 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
378 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
379 intel_logical_ring_emit(ringbuf, MI_NOOP);
380 intel_logical_ring_advance(ringbuf);
381
382 return 0;
383}
384
Oscar Mateo73d477f2014-07-24 17:04:31 +0100385static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
386{
387 struct drm_device *dev = ring->dev;
388 struct drm_i915_private *dev_priv = dev->dev_private;
389 unsigned long flags;
390
391 if (!dev->irq_enabled)
392 return false;
393
394 spin_lock_irqsave(&dev_priv->irq_lock, flags);
395 if (ring->irq_refcount++ == 0) {
396 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
397 POSTING_READ(RING_IMR(ring->mmio_base));
398 }
399 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
400
401 return true;
402}
403
404static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
405{
406 struct drm_device *dev = ring->dev;
407 struct drm_i915_private *dev_priv = dev->dev_private;
408 unsigned long flags;
409
410 spin_lock_irqsave(&dev_priv->irq_lock, flags);
411 if (--ring->irq_refcount == 0) {
412 I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
413 POSTING_READ(RING_IMR(ring->mmio_base));
414 }
415 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
416}
417
Oscar Mateo47122742014-07-24 17:04:28 +0100418static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
419 u32 invalidate_domains,
420 u32 unused)
421{
422 struct intel_engine_cs *ring = ringbuf->ring;
423 struct drm_device *dev = ring->dev;
424 struct drm_i915_private *dev_priv = dev->dev_private;
425 uint32_t cmd;
426 int ret;
427
428 ret = intel_logical_ring_begin(ringbuf, 4);
429 if (ret)
430 return ret;
431
432 cmd = MI_FLUSH_DW + 1;
433
434 if (ring == &dev_priv->ring[VCS]) {
435 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
436 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
437 MI_FLUSH_DW_STORE_INDEX |
438 MI_FLUSH_DW_OP_STOREDW;
439 } else {
440 if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
441 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
442 MI_FLUSH_DW_OP_STOREDW;
443 }
444
445 intel_logical_ring_emit(ringbuf, cmd);
446 intel_logical_ring_emit(ringbuf,
447 I915_GEM_HWS_SCRATCH_ADDR |
448 MI_FLUSH_DW_USE_GTT);
449 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
450 intel_logical_ring_emit(ringbuf, 0); /* value */
451 intel_logical_ring_advance(ringbuf);
452
453 return 0;
454}
455
456static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
457 u32 invalidate_domains,
458 u32 flush_domains)
459{
460 struct intel_engine_cs *ring = ringbuf->ring;
461 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
462 u32 flags = 0;
463 int ret;
464
465 flags |= PIPE_CONTROL_CS_STALL;
466
467 if (flush_domains) {
468 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
469 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
470 }
471
472 if (invalidate_domains) {
473 flags |= PIPE_CONTROL_TLB_INVALIDATE;
474 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
475 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
476 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
477 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
478 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
479 flags |= PIPE_CONTROL_QW_WRITE;
480 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
481 }
482
483 ret = intel_logical_ring_begin(ringbuf, 6);
484 if (ret)
485 return ret;
486
487 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
488 intel_logical_ring_emit(ringbuf, flags);
489 intel_logical_ring_emit(ringbuf, scratch_addr);
490 intel_logical_ring_emit(ringbuf, 0);
491 intel_logical_ring_emit(ringbuf, 0);
492 intel_logical_ring_emit(ringbuf, 0);
493 intel_logical_ring_advance(ringbuf);
494
495 return 0;
496}
497
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100498static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
499{
500 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
501}
502
503static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
504{
505 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
506}
507
Oscar Mateo4da46e12014-07-24 17:04:27 +0100508static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
509{
510 struct intel_engine_cs *ring = ringbuf->ring;
511 u32 cmd;
512 int ret;
513
514 ret = intel_logical_ring_begin(ringbuf, 6);
515 if (ret)
516 return ret;
517
518 cmd = MI_STORE_DWORD_IMM_GEN8;
519 cmd |= MI_GLOBAL_GTT;
520
521 intel_logical_ring_emit(ringbuf, cmd);
522 intel_logical_ring_emit(ringbuf,
523 (ring->status_page.gfx_addr +
524 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
525 intel_logical_ring_emit(ringbuf, 0);
526 intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
527 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
528 intel_logical_ring_emit(ringbuf, MI_NOOP);
529 intel_logical_ring_advance_and_submit(ringbuf);
530
531 return 0;
532}
533
Oscar Mateo454afeb2014-07-24 17:04:22 +0100534void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
535{
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100536 struct drm_i915_private *dev_priv = ring->dev->dev_private;
537
Oscar Mateo48d82382014-07-24 17:04:23 +0100538 if (!intel_ring_initialized(ring))
539 return;
540
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100541 intel_logical_ring_stop(ring);
542 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
Oscar Mateo48d82382014-07-24 17:04:23 +0100543 ring->preallocated_lazy_request = NULL;
544 ring->outstanding_lazy_seqno = 0;
545
546 if (ring->cleanup)
547 ring->cleanup(ring);
548
549 i915_cmd_parser_fini_ring(ring);
550
551 if (ring->status_page.obj) {
552 kunmap(sg_page(ring->status_page.obj->pages->sgl));
553 ring->status_page.obj = NULL;
554 }
Oscar Mateo454afeb2014-07-24 17:04:22 +0100555}
556
557static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
558{
Oscar Mateo48d82382014-07-24 17:04:23 +0100559 int ret;
560 struct intel_context *dctx = ring->default_context;
561 struct drm_i915_gem_object *dctx_obj;
562
563 /* Intentionally left blank. */
564 ring->buffer = NULL;
565
566 ring->dev = dev;
567 INIT_LIST_HEAD(&ring->active_list);
568 INIT_LIST_HEAD(&ring->request_list);
569 init_waitqueue_head(&ring->irq_queue);
570
571 ret = intel_lr_context_deferred_create(dctx, ring);
572 if (ret)
573 return ret;
574
575 /* The status page is offset 0 from the context object in LRCs. */
576 dctx_obj = dctx->engine[ring->id].state;
577 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
578 ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
579 if (ring->status_page.page_addr == NULL)
580 return -ENOMEM;
581 ring->status_page.obj = dctx_obj;
582
583 ret = i915_cmd_parser_init_ring(ring);
584 if (ret)
585 return ret;
586
587 if (ring->init) {
588 ret = ring->init(ring);
589 if (ret)
590 return ret;
591 }
592
Oscar Mateo454afeb2014-07-24 17:04:22 +0100593 return 0;
594}
595
596static int logical_render_ring_init(struct drm_device *dev)
597{
598 struct drm_i915_private *dev_priv = dev->dev_private;
599 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
600
601 ring->name = "render ring";
602 ring->id = RCS;
603 ring->mmio_base = RENDER_RING_BASE;
604 ring->irq_enable_mask =
605 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100606 ring->irq_keep_mask =
607 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
608 if (HAS_L3_DPF(dev))
609 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100610
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100611 ring->init = gen8_init_render_ring;
612 ring->cleanup = intel_fini_pipe_control;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100613 ring->get_seqno = gen8_get_seqno;
614 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100615 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100616 ring->emit_flush = gen8_emit_flush_render;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100617 ring->irq_get = gen8_logical_ring_get_irq;
618 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100619 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100620
Oscar Mateo454afeb2014-07-24 17:04:22 +0100621 return logical_ring_init(dev, ring);
622}
623
624static int logical_bsd_ring_init(struct drm_device *dev)
625{
626 struct drm_i915_private *dev_priv = dev->dev_private;
627 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
628
629 ring->name = "bsd ring";
630 ring->id = VCS;
631 ring->mmio_base = GEN6_BSD_RING_BASE;
632 ring->irq_enable_mask =
633 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100634 ring->irq_keep_mask =
635 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100636
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100637 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100638 ring->get_seqno = gen8_get_seqno;
639 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100640 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100641 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100642 ring->irq_get = gen8_logical_ring_get_irq;
643 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100644 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100645
Oscar Mateo454afeb2014-07-24 17:04:22 +0100646 return logical_ring_init(dev, ring);
647}
648
649static int logical_bsd2_ring_init(struct drm_device *dev)
650{
651 struct drm_i915_private *dev_priv = dev->dev_private;
652 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
653
654 ring->name = "bds2 ring";
655 ring->id = VCS2;
656 ring->mmio_base = GEN8_BSD2_RING_BASE;
657 ring->irq_enable_mask =
658 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100659 ring->irq_keep_mask =
660 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100661
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100662 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100663 ring->get_seqno = gen8_get_seqno;
664 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100665 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100666 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100667 ring->irq_get = gen8_logical_ring_get_irq;
668 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100669 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100670
Oscar Mateo454afeb2014-07-24 17:04:22 +0100671 return logical_ring_init(dev, ring);
672}
673
674static int logical_blt_ring_init(struct drm_device *dev)
675{
676 struct drm_i915_private *dev_priv = dev->dev_private;
677 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
678
679 ring->name = "blitter ring";
680 ring->id = BCS;
681 ring->mmio_base = BLT_RING_BASE;
682 ring->irq_enable_mask =
683 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100684 ring->irq_keep_mask =
685 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100686
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100687 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100688 ring->get_seqno = gen8_get_seqno;
689 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100690 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100691 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100692 ring->irq_get = gen8_logical_ring_get_irq;
693 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100694 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100695
Oscar Mateo454afeb2014-07-24 17:04:22 +0100696 return logical_ring_init(dev, ring);
697}
698
699static int logical_vebox_ring_init(struct drm_device *dev)
700{
701 struct drm_i915_private *dev_priv = dev->dev_private;
702 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
703
704 ring->name = "video enhancement ring";
705 ring->id = VECS;
706 ring->mmio_base = VEBOX_RING_BASE;
707 ring->irq_enable_mask =
708 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100709 ring->irq_keep_mask =
710 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
Oscar Mateo454afeb2014-07-24 17:04:22 +0100711
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100712 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100713 ring->get_seqno = gen8_get_seqno;
714 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100715 ring->emit_request = gen8_emit_request;
Oscar Mateo47122742014-07-24 17:04:28 +0100716 ring->emit_flush = gen8_emit_flush;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100717 ring->irq_get = gen8_logical_ring_get_irq;
718 ring->irq_put = gen8_logical_ring_put_irq;
Oscar Mateo15648582014-07-24 17:04:32 +0100719 ring->emit_bb_start = gen8_emit_bb_start;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100720
Oscar Mateo454afeb2014-07-24 17:04:22 +0100721 return logical_ring_init(dev, ring);
722}
723
724int intel_logical_rings_init(struct drm_device *dev)
725{
726 struct drm_i915_private *dev_priv = dev->dev_private;
727 int ret;
728
729 ret = logical_render_ring_init(dev);
730 if (ret)
731 return ret;
732
733 if (HAS_BSD(dev)) {
734 ret = logical_bsd_ring_init(dev);
735 if (ret)
736 goto cleanup_render_ring;
737 }
738
739 if (HAS_BLT(dev)) {
740 ret = logical_blt_ring_init(dev);
741 if (ret)
742 goto cleanup_bsd_ring;
743 }
744
745 if (HAS_VEBOX(dev)) {
746 ret = logical_vebox_ring_init(dev);
747 if (ret)
748 goto cleanup_blt_ring;
749 }
750
751 if (HAS_BSD2(dev)) {
752 ret = logical_bsd2_ring_init(dev);
753 if (ret)
754 goto cleanup_vebox_ring;
755 }
756
757 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
758 if (ret)
759 goto cleanup_bsd2_ring;
760
761 return 0;
762
763cleanup_bsd2_ring:
764 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
765cleanup_vebox_ring:
766 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
767cleanup_blt_ring:
768 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
769cleanup_bsd_ring:
770 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
771cleanup_render_ring:
772 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
773
774 return ret;
775}
776
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100777static int
778populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
779 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
780{
781 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
782 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
783 struct page *page;
784 uint32_t *reg_state;
785 int ret;
786
787 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
788 if (ret) {
789 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
790 return ret;
791 }
792
793 ret = i915_gem_object_get_pages(ctx_obj);
794 if (ret) {
795 DRM_DEBUG_DRIVER("Could not get object pages\n");
796 return ret;
797 }
798
799 i915_gem_object_pin_pages(ctx_obj);
800
801 /* The second page of the context object contains some fields which must
802 * be set up prior to the first execution. */
803 page = i915_gem_object_get_page(ctx_obj, 1);
804 reg_state = kmap_atomic(page);
805
806 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
807 * commands followed by (reg, value) pairs. The values we are setting here are
808 * only for the first context restore: on a subsequent save, the GPU will
809 * recreate this batchbuffer with new values (including all the missing
810 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
811 if (ring->id == RCS)
812 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
813 else
814 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
815 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
816 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
817 reg_state[CTX_CONTEXT_CONTROL+1] =
818 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
819 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
820 reg_state[CTX_RING_HEAD+1] = 0;
821 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
822 reg_state[CTX_RING_TAIL+1] = 0;
823 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
824 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
825 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
826 reg_state[CTX_RING_BUFFER_CONTROL+1] =
827 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
828 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
829 reg_state[CTX_BB_HEAD_U+1] = 0;
830 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
831 reg_state[CTX_BB_HEAD_L+1] = 0;
832 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
833 reg_state[CTX_BB_STATE+1] = (1<<5);
834 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
835 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
836 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
837 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
838 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
839 reg_state[CTX_SECOND_BB_STATE+1] = 0;
840 if (ring->id == RCS) {
841 /* TODO: according to BSpec, the register state context
842 * for CHV does not have these. OTOH, these registers do
843 * exist in CHV. I'm waiting for a clarification */
844 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
845 reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
846 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
847 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
848 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
849 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
850 }
851 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
852 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
853 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
854 reg_state[CTX_CTX_TIMESTAMP+1] = 0;
855 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
856 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
857 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
858 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
859 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
860 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
861 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
862 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
863 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
864 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
865 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
866 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
867 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
868 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
869 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
870 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
871 if (ring->id == RCS) {
872 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
873 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
874 reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
875 }
876
877 kunmap_atomic(reg_state);
878
879 ctx_obj->dirty = 1;
880 set_page_dirty(page);
881 i915_gem_object_unpin_pages(ctx_obj);
882
883 return 0;
884}
885
Oscar Mateoede7d422014-07-24 17:04:12 +0100886void intel_lr_context_free(struct intel_context *ctx)
887{
Oscar Mateo8c8579172014-07-24 17:04:14 +0100888 int i;
889
890 for (i = 0; i < I915_NUM_RINGS; i++) {
891 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
Oscar Mateo84c23772014-07-24 17:04:15 +0100892 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
893
Oscar Mateo8c8579172014-07-24 17:04:14 +0100894 if (ctx_obj) {
Oscar Mateo84c23772014-07-24 17:04:15 +0100895 intel_destroy_ringbuffer_obj(ringbuf);
896 kfree(ringbuf);
Oscar Mateo8c8579172014-07-24 17:04:14 +0100897 i915_gem_object_ggtt_unpin(ctx_obj);
898 drm_gem_object_unreference(&ctx_obj->base);
899 }
900 }
901}
902
903static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
904{
905 int ret = 0;
906
907 WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
908
909 switch (ring->id) {
910 case RCS:
911 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
912 break;
913 case VCS:
914 case BCS:
915 case VECS:
916 case VCS2:
917 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
918 break;
919 }
920
921 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +0100922}
923
924int intel_lr_context_deferred_create(struct intel_context *ctx,
925 struct intel_engine_cs *ring)
926{
Oscar Mateo8c8579172014-07-24 17:04:14 +0100927 struct drm_device *dev = ring->dev;
928 struct drm_i915_gem_object *ctx_obj;
929 uint32_t context_size;
Oscar Mateo84c23772014-07-24 17:04:15 +0100930 struct intel_ringbuffer *ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +0100931 int ret;
932
Oscar Mateoede7d422014-07-24 17:04:12 +0100933 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
Oscar Mateo48d82382014-07-24 17:04:23 +0100934 if (ctx->engine[ring->id].state)
935 return 0;
Oscar Mateoede7d422014-07-24 17:04:12 +0100936
Oscar Mateo8c8579172014-07-24 17:04:14 +0100937 context_size = round_up(get_lr_context_size(ring), 4096);
938
939 ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
940 if (IS_ERR(ctx_obj)) {
941 ret = PTR_ERR(ctx_obj);
942 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
943 return ret;
944 }
945
946 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
947 if (ret) {
948 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
949 drm_gem_object_unreference(&ctx_obj->base);
950 return ret;
951 }
952
Oscar Mateo84c23772014-07-24 17:04:15 +0100953 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
954 if (!ringbuf) {
955 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
956 ring->name);
957 i915_gem_object_ggtt_unpin(ctx_obj);
958 drm_gem_object_unreference(&ctx_obj->base);
959 ret = -ENOMEM;
960 return ret;
961 }
962
Daniel Vetter0c7dd532014-08-11 16:17:44 +0200963 ringbuf->ring = ring;
Oscar Mateo84c23772014-07-24 17:04:15 +0100964 ringbuf->size = 32 * PAGE_SIZE;
965 ringbuf->effective_size = ringbuf->size;
966 ringbuf->head = 0;
967 ringbuf->tail = 0;
968 ringbuf->space = ringbuf->size;
969 ringbuf->last_retired_head = -1;
970
971 /* TODO: For now we put this in the mappable region so that we can reuse
972 * the existing ringbuffer code which ioremaps it. When we start
973 * creating many contexts, this will no longer work and we must switch
974 * to a kmapish interface.
975 */
976 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
977 if (ret) {
978 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
979 ring->name, ret);
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100980 goto error;
981 }
982
983 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
984 if (ret) {
985 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
986 intel_destroy_ringbuffer_obj(ringbuf);
987 goto error;
Oscar Mateo84c23772014-07-24 17:04:15 +0100988 }
989
990 ctx->engine[ring->id].ringbuf = ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +0100991 ctx->engine[ring->id].state = ctx_obj;
Oscar Mateoede7d422014-07-24 17:04:12 +0100992
993 return 0;
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100994
995error:
996 kfree(ringbuf);
997 i915_gem_object_ggtt_unpin(ctx_obj);
998 drm_gem_object_unreference(&ctx_obj->base);
999 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01001000}