blob: 94f8b4087642550b14300b28fead691f7b48bd6b [file] [log] [blame]
Oscar Mateob20385f2014-07-24 17:04:10 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
31/*
32 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
33 * These expanded contexts enable a number of new abilities, especially
34 * "Execlists" (also implemented in this file).
35 *
36 * Execlists are the new method by which, on gen8+ hardware, workloads are
37 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
38 */
39
40#include <drm/drmP.h>
41#include <drm/i915_drm.h>
42#include "i915_drv.h"
Oscar Mateo127f1002014-07-24 17:04:11 +010043
Oscar Mateo8c8579172014-07-24 17:04:14 +010044#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
45#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
46
47#define GEN8_LR_CONTEXT_ALIGN 4096
48
Oscar Mateo8670d6f2014-07-24 17:04:17 +010049#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
50#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
51
52#define CTX_LRI_HEADER_0 0x01
53#define CTX_CONTEXT_CONTROL 0x02
54#define CTX_RING_HEAD 0x04
55#define CTX_RING_TAIL 0x06
56#define CTX_RING_BUFFER_START 0x08
57#define CTX_RING_BUFFER_CONTROL 0x0a
58#define CTX_BB_HEAD_U 0x0c
59#define CTX_BB_HEAD_L 0x0e
60#define CTX_BB_STATE 0x10
61#define CTX_SECOND_BB_HEAD_U 0x12
62#define CTX_SECOND_BB_HEAD_L 0x14
63#define CTX_SECOND_BB_STATE 0x16
64#define CTX_BB_PER_CTX_PTR 0x18
65#define CTX_RCS_INDIRECT_CTX 0x1a
66#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
67#define CTX_LRI_HEADER_1 0x21
68#define CTX_CTX_TIMESTAMP 0x22
69#define CTX_PDP3_UDW 0x24
70#define CTX_PDP3_LDW 0x26
71#define CTX_PDP2_UDW 0x28
72#define CTX_PDP2_LDW 0x2a
73#define CTX_PDP1_UDW 0x2c
74#define CTX_PDP1_LDW 0x2e
75#define CTX_PDP0_UDW 0x30
76#define CTX_PDP0_LDW 0x32
77#define CTX_LRI_HEADER_2 0x41
78#define CTX_R_PWR_CLK_STATE 0x42
79#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
80
Oscar Mateo127f1002014-07-24 17:04:11 +010081int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
82{
Daniel Vetterbd84b1e2014-08-11 15:57:57 +020083 WARN_ON(i915.enable_ppgtt == -1);
84
Oscar Mateo127f1002014-07-24 17:04:11 +010085 if (enable_execlists == 0)
86 return 0;
87
88 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev))
89 return 1;
90
91 return 0;
92}
Oscar Mateoede7d422014-07-24 17:04:12 +010093
Oscar Mateo454afeb2014-07-24 17:04:22 +010094int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
95 struct intel_engine_cs *ring,
96 struct intel_context *ctx,
97 struct drm_i915_gem_execbuffer2 *args,
98 struct list_head *vmas,
99 struct drm_i915_gem_object *batch_obj,
100 u64 exec_start, u32 flags)
101{
102 /* TODO */
103 return 0;
104}
105
106void intel_logical_ring_stop(struct intel_engine_cs *ring)
107{
108 /* TODO */
109}
110
Oscar Mateo82e104c2014-07-24 17:04:26 +0100111void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
112{
113 intel_logical_ring_advance(ringbuf);
114
115 if (intel_ring_stopped(ringbuf->ring))
116 return;
117
118 /* TODO: how to submit a context to the ELSP is not here yet */
119}
120
121static int logical_ring_alloc_seqno(struct intel_engine_cs *ring)
122{
123 if (ring->outstanding_lazy_seqno)
124 return 0;
125
126 if (ring->preallocated_lazy_request == NULL) {
127 struct drm_i915_gem_request *request;
128
129 request = kmalloc(sizeof(*request), GFP_KERNEL);
130 if (request == NULL)
131 return -ENOMEM;
132
133 ring->preallocated_lazy_request = request;
134 }
135
136 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
137}
138
139static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
140 int bytes)
141{
142 struct intel_engine_cs *ring = ringbuf->ring;
143 struct drm_i915_gem_request *request;
144 u32 seqno = 0;
145 int ret;
146
147 if (ringbuf->last_retired_head != -1) {
148 ringbuf->head = ringbuf->last_retired_head;
149 ringbuf->last_retired_head = -1;
150
151 ringbuf->space = intel_ring_space(ringbuf);
152 if (ringbuf->space >= bytes)
153 return 0;
154 }
155
156 list_for_each_entry(request, &ring->request_list, list) {
157 if (__intel_ring_space(request->tail, ringbuf->tail,
158 ringbuf->size) >= bytes) {
159 seqno = request->seqno;
160 break;
161 }
162 }
163
164 if (seqno == 0)
165 return -ENOSPC;
166
167 ret = i915_wait_seqno(ring, seqno);
168 if (ret)
169 return ret;
170
171 /* TODO: make sure we update the right ringbuffer's last_retired_head
172 * when retiring requests */
173 i915_gem_retire_requests_ring(ring);
174 ringbuf->head = ringbuf->last_retired_head;
175 ringbuf->last_retired_head = -1;
176
177 ringbuf->space = intel_ring_space(ringbuf);
178 return 0;
179}
180
181static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
182 int bytes)
183{
184 struct intel_engine_cs *ring = ringbuf->ring;
185 struct drm_device *dev = ring->dev;
186 struct drm_i915_private *dev_priv = dev->dev_private;
187 unsigned long end;
188 int ret;
189
190 ret = logical_ring_wait_request(ringbuf, bytes);
191 if (ret != -ENOSPC)
192 return ret;
193
194 /* Force the context submission in case we have been skipping it */
195 intel_logical_ring_advance_and_submit(ringbuf);
196
197 /* With GEM the hangcheck timer should kick us out of the loop,
198 * leaving it early runs the risk of corrupting GEM state (due
199 * to running on almost untested codepaths). But on resume
200 * timers don't work yet, so prevent a complete hang in that
201 * case by choosing an insanely large timeout. */
202 end = jiffies + 60 * HZ;
203
204 do {
205 ringbuf->head = I915_READ_HEAD(ring);
206 ringbuf->space = intel_ring_space(ringbuf);
207 if (ringbuf->space >= bytes) {
208 ret = 0;
209 break;
210 }
211
212 msleep(1);
213
214 if (dev_priv->mm.interruptible && signal_pending(current)) {
215 ret = -ERESTARTSYS;
216 break;
217 }
218
219 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
220 dev_priv->mm.interruptible);
221 if (ret)
222 break;
223
224 if (time_after(jiffies, end)) {
225 ret = -EBUSY;
226 break;
227 }
228 } while (1);
229
230 return ret;
231}
232
233static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
234{
235 uint32_t __iomem *virt;
236 int rem = ringbuf->size - ringbuf->tail;
237
238 if (ringbuf->space < rem) {
239 int ret = logical_ring_wait_for_space(ringbuf, rem);
240
241 if (ret)
242 return ret;
243 }
244
245 virt = ringbuf->virtual_start + ringbuf->tail;
246 rem /= 4;
247 while (rem--)
248 iowrite32(MI_NOOP, virt++);
249
250 ringbuf->tail = 0;
251 ringbuf->space = intel_ring_space(ringbuf);
252
253 return 0;
254}
255
256static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
257{
258 int ret;
259
260 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
261 ret = logical_ring_wrap_buffer(ringbuf);
262 if (unlikely(ret))
263 return ret;
264 }
265
266 if (unlikely(ringbuf->space < bytes)) {
267 ret = logical_ring_wait_for_space(ringbuf, bytes);
268 if (unlikely(ret))
269 return ret;
270 }
271
272 return 0;
273}
274
275int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
276{
277 struct intel_engine_cs *ring = ringbuf->ring;
278 struct drm_device *dev = ring->dev;
279 struct drm_i915_private *dev_priv = dev->dev_private;
280 int ret;
281
282 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
283 dev_priv->mm.interruptible);
284 if (ret)
285 return ret;
286
287 ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
288 if (ret)
289 return ret;
290
291 /* Preallocate the olr before touching the ring */
292 ret = logical_ring_alloc_seqno(ring);
293 if (ret)
294 return ret;
295
296 ringbuf->space -= num_dwords * sizeof(uint32_t);
297 return 0;
298}
299
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100300static int gen8_init_common_ring(struct intel_engine_cs *ring)
301{
302 struct drm_device *dev = ring->dev;
303 struct drm_i915_private *dev_priv = dev->dev_private;
304
305 I915_WRITE(RING_MODE_GEN7(ring),
306 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
307 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
308 POSTING_READ(RING_MODE_GEN7(ring));
309 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
310
311 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
312
313 return 0;
314}
315
316static int gen8_init_render_ring(struct intel_engine_cs *ring)
317{
318 struct drm_device *dev = ring->dev;
319 struct drm_i915_private *dev_priv = dev->dev_private;
320 int ret;
321
322 ret = gen8_init_common_ring(ring);
323 if (ret)
324 return ret;
325
326 /* We need to disable the AsyncFlip performance optimisations in order
327 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
328 * programmed to '1' on all products.
329 *
330 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
331 */
332 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
333
334 ret = intel_init_pipe_control(ring);
335 if (ret)
336 return ret;
337
338 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
339
340 return ret;
341}
342
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100343static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
344{
345 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
346}
347
348static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
349{
350 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
351}
352
Oscar Mateo4da46e12014-07-24 17:04:27 +0100353static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
354{
355 struct intel_engine_cs *ring = ringbuf->ring;
356 u32 cmd;
357 int ret;
358
359 ret = intel_logical_ring_begin(ringbuf, 6);
360 if (ret)
361 return ret;
362
363 cmd = MI_STORE_DWORD_IMM_GEN8;
364 cmd |= MI_GLOBAL_GTT;
365
366 intel_logical_ring_emit(ringbuf, cmd);
367 intel_logical_ring_emit(ringbuf,
368 (ring->status_page.gfx_addr +
369 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
370 intel_logical_ring_emit(ringbuf, 0);
371 intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
372 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
373 intel_logical_ring_emit(ringbuf, MI_NOOP);
374 intel_logical_ring_advance_and_submit(ringbuf);
375
376 return 0;
377}
378
Oscar Mateo454afeb2014-07-24 17:04:22 +0100379void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
380{
Oscar Mateo48d82382014-07-24 17:04:23 +0100381 if (!intel_ring_initialized(ring))
382 return;
383
384 /* TODO: make sure the ring is stopped */
385 ring->preallocated_lazy_request = NULL;
386 ring->outstanding_lazy_seqno = 0;
387
388 if (ring->cleanup)
389 ring->cleanup(ring);
390
391 i915_cmd_parser_fini_ring(ring);
392
393 if (ring->status_page.obj) {
394 kunmap(sg_page(ring->status_page.obj->pages->sgl));
395 ring->status_page.obj = NULL;
396 }
Oscar Mateo454afeb2014-07-24 17:04:22 +0100397}
398
399static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
400{
Oscar Mateo48d82382014-07-24 17:04:23 +0100401 int ret;
402 struct intel_context *dctx = ring->default_context;
403 struct drm_i915_gem_object *dctx_obj;
404
405 /* Intentionally left blank. */
406 ring->buffer = NULL;
407
408 ring->dev = dev;
409 INIT_LIST_HEAD(&ring->active_list);
410 INIT_LIST_HEAD(&ring->request_list);
411 init_waitqueue_head(&ring->irq_queue);
412
413 ret = intel_lr_context_deferred_create(dctx, ring);
414 if (ret)
415 return ret;
416
417 /* The status page is offset 0 from the context object in LRCs. */
418 dctx_obj = dctx->engine[ring->id].state;
419 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
420 ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
421 if (ring->status_page.page_addr == NULL)
422 return -ENOMEM;
423 ring->status_page.obj = dctx_obj;
424
425 ret = i915_cmd_parser_init_ring(ring);
426 if (ret)
427 return ret;
428
429 if (ring->init) {
430 ret = ring->init(ring);
431 if (ret)
432 return ret;
433 }
434
Oscar Mateo454afeb2014-07-24 17:04:22 +0100435 return 0;
436}
437
438static int logical_render_ring_init(struct drm_device *dev)
439{
440 struct drm_i915_private *dev_priv = dev->dev_private;
441 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
442
443 ring->name = "render ring";
444 ring->id = RCS;
445 ring->mmio_base = RENDER_RING_BASE;
446 ring->irq_enable_mask =
447 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
448
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100449 ring->init = gen8_init_render_ring;
450 ring->cleanup = intel_fini_pipe_control;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100451 ring->get_seqno = gen8_get_seqno;
452 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100453 ring->emit_request = gen8_emit_request;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100454
Oscar Mateo454afeb2014-07-24 17:04:22 +0100455 return logical_ring_init(dev, ring);
456}
457
458static int logical_bsd_ring_init(struct drm_device *dev)
459{
460 struct drm_i915_private *dev_priv = dev->dev_private;
461 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
462
463 ring->name = "bsd ring";
464 ring->id = VCS;
465 ring->mmio_base = GEN6_BSD_RING_BASE;
466 ring->irq_enable_mask =
467 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
468
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100469 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100470 ring->get_seqno = gen8_get_seqno;
471 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100472 ring->emit_request = gen8_emit_request;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100473
Oscar Mateo454afeb2014-07-24 17:04:22 +0100474 return logical_ring_init(dev, ring);
475}
476
477static int logical_bsd2_ring_init(struct drm_device *dev)
478{
479 struct drm_i915_private *dev_priv = dev->dev_private;
480 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
481
482 ring->name = "bds2 ring";
483 ring->id = VCS2;
484 ring->mmio_base = GEN8_BSD2_RING_BASE;
485 ring->irq_enable_mask =
486 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
487
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100488 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100489 ring->get_seqno = gen8_get_seqno;
490 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100491 ring->emit_request = gen8_emit_request;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100492
Oscar Mateo454afeb2014-07-24 17:04:22 +0100493 return logical_ring_init(dev, ring);
494}
495
496static int logical_blt_ring_init(struct drm_device *dev)
497{
498 struct drm_i915_private *dev_priv = dev->dev_private;
499 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
500
501 ring->name = "blitter ring";
502 ring->id = BCS;
503 ring->mmio_base = BLT_RING_BASE;
504 ring->irq_enable_mask =
505 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
506
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100507 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100508 ring->get_seqno = gen8_get_seqno;
509 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100510 ring->emit_request = gen8_emit_request;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100511
Oscar Mateo454afeb2014-07-24 17:04:22 +0100512 return logical_ring_init(dev, ring);
513}
514
515static int logical_vebox_ring_init(struct drm_device *dev)
516{
517 struct drm_i915_private *dev_priv = dev->dev_private;
518 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
519
520 ring->name = "video enhancement ring";
521 ring->id = VECS;
522 ring->mmio_base = VEBOX_RING_BASE;
523 ring->irq_enable_mask =
524 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
525
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100526 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100527 ring->get_seqno = gen8_get_seqno;
528 ring->set_seqno = gen8_set_seqno;
Oscar Mateo4da46e12014-07-24 17:04:27 +0100529 ring->emit_request = gen8_emit_request;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100530
Oscar Mateo454afeb2014-07-24 17:04:22 +0100531 return logical_ring_init(dev, ring);
532}
533
534int intel_logical_rings_init(struct drm_device *dev)
535{
536 struct drm_i915_private *dev_priv = dev->dev_private;
537 int ret;
538
539 ret = logical_render_ring_init(dev);
540 if (ret)
541 return ret;
542
543 if (HAS_BSD(dev)) {
544 ret = logical_bsd_ring_init(dev);
545 if (ret)
546 goto cleanup_render_ring;
547 }
548
549 if (HAS_BLT(dev)) {
550 ret = logical_blt_ring_init(dev);
551 if (ret)
552 goto cleanup_bsd_ring;
553 }
554
555 if (HAS_VEBOX(dev)) {
556 ret = logical_vebox_ring_init(dev);
557 if (ret)
558 goto cleanup_blt_ring;
559 }
560
561 if (HAS_BSD2(dev)) {
562 ret = logical_bsd2_ring_init(dev);
563 if (ret)
564 goto cleanup_vebox_ring;
565 }
566
567 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
568 if (ret)
569 goto cleanup_bsd2_ring;
570
571 return 0;
572
573cleanup_bsd2_ring:
574 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
575cleanup_vebox_ring:
576 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
577cleanup_blt_ring:
578 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
579cleanup_bsd_ring:
580 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
581cleanup_render_ring:
582 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
583
584 return ret;
585}
586
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100587static int
588populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
589 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
590{
591 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
592 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
593 struct page *page;
594 uint32_t *reg_state;
595 int ret;
596
597 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
598 if (ret) {
599 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
600 return ret;
601 }
602
603 ret = i915_gem_object_get_pages(ctx_obj);
604 if (ret) {
605 DRM_DEBUG_DRIVER("Could not get object pages\n");
606 return ret;
607 }
608
609 i915_gem_object_pin_pages(ctx_obj);
610
611 /* The second page of the context object contains some fields which must
612 * be set up prior to the first execution. */
613 page = i915_gem_object_get_page(ctx_obj, 1);
614 reg_state = kmap_atomic(page);
615
616 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
617 * commands followed by (reg, value) pairs. The values we are setting here are
618 * only for the first context restore: on a subsequent save, the GPU will
619 * recreate this batchbuffer with new values (including all the missing
620 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
621 if (ring->id == RCS)
622 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
623 else
624 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
625 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
626 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
627 reg_state[CTX_CONTEXT_CONTROL+1] =
628 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
629 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
630 reg_state[CTX_RING_HEAD+1] = 0;
631 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
632 reg_state[CTX_RING_TAIL+1] = 0;
633 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
634 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
635 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
636 reg_state[CTX_RING_BUFFER_CONTROL+1] =
637 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
638 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
639 reg_state[CTX_BB_HEAD_U+1] = 0;
640 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
641 reg_state[CTX_BB_HEAD_L+1] = 0;
642 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
643 reg_state[CTX_BB_STATE+1] = (1<<5);
644 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
645 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
646 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
647 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
648 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
649 reg_state[CTX_SECOND_BB_STATE+1] = 0;
650 if (ring->id == RCS) {
651 /* TODO: according to BSpec, the register state context
652 * for CHV does not have these. OTOH, these registers do
653 * exist in CHV. I'm waiting for a clarification */
654 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
655 reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
656 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
657 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
658 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
659 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
660 }
661 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
662 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
663 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
664 reg_state[CTX_CTX_TIMESTAMP+1] = 0;
665 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
666 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
667 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
668 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
669 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
670 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
671 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
672 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
673 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
674 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
675 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
676 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
677 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
678 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
679 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
680 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
681 if (ring->id == RCS) {
682 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
683 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
684 reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
685 }
686
687 kunmap_atomic(reg_state);
688
689 ctx_obj->dirty = 1;
690 set_page_dirty(page);
691 i915_gem_object_unpin_pages(ctx_obj);
692
693 return 0;
694}
695
Oscar Mateoede7d422014-07-24 17:04:12 +0100696void intel_lr_context_free(struct intel_context *ctx)
697{
Oscar Mateo8c8579172014-07-24 17:04:14 +0100698 int i;
699
700 for (i = 0; i < I915_NUM_RINGS; i++) {
701 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
Oscar Mateo84c23772014-07-24 17:04:15 +0100702 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
703
Oscar Mateo8c8579172014-07-24 17:04:14 +0100704 if (ctx_obj) {
Oscar Mateo84c23772014-07-24 17:04:15 +0100705 intel_destroy_ringbuffer_obj(ringbuf);
706 kfree(ringbuf);
Oscar Mateo8c8579172014-07-24 17:04:14 +0100707 i915_gem_object_ggtt_unpin(ctx_obj);
708 drm_gem_object_unreference(&ctx_obj->base);
709 }
710 }
711}
712
713static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
714{
715 int ret = 0;
716
717 WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
718
719 switch (ring->id) {
720 case RCS:
721 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
722 break;
723 case VCS:
724 case BCS:
725 case VECS:
726 case VCS2:
727 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
728 break;
729 }
730
731 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +0100732}
733
734int intel_lr_context_deferred_create(struct intel_context *ctx,
735 struct intel_engine_cs *ring)
736{
Oscar Mateo8c8579172014-07-24 17:04:14 +0100737 struct drm_device *dev = ring->dev;
738 struct drm_i915_gem_object *ctx_obj;
739 uint32_t context_size;
Oscar Mateo84c23772014-07-24 17:04:15 +0100740 struct intel_ringbuffer *ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +0100741 int ret;
742
Oscar Mateoede7d422014-07-24 17:04:12 +0100743 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
Oscar Mateo48d82382014-07-24 17:04:23 +0100744 if (ctx->engine[ring->id].state)
745 return 0;
Oscar Mateoede7d422014-07-24 17:04:12 +0100746
Oscar Mateo8c8579172014-07-24 17:04:14 +0100747 context_size = round_up(get_lr_context_size(ring), 4096);
748
749 ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
750 if (IS_ERR(ctx_obj)) {
751 ret = PTR_ERR(ctx_obj);
752 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
753 return ret;
754 }
755
756 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
757 if (ret) {
758 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
759 drm_gem_object_unreference(&ctx_obj->base);
760 return ret;
761 }
762
Oscar Mateo84c23772014-07-24 17:04:15 +0100763 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
764 if (!ringbuf) {
765 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
766 ring->name);
767 i915_gem_object_ggtt_unpin(ctx_obj);
768 drm_gem_object_unreference(&ctx_obj->base);
769 ret = -ENOMEM;
770 return ret;
771 }
772
Daniel Vetter0c7dd532014-08-11 16:17:44 +0200773 ringbuf->ring = ring;
Oscar Mateo84c23772014-07-24 17:04:15 +0100774 ringbuf->size = 32 * PAGE_SIZE;
775 ringbuf->effective_size = ringbuf->size;
776 ringbuf->head = 0;
777 ringbuf->tail = 0;
778 ringbuf->space = ringbuf->size;
779 ringbuf->last_retired_head = -1;
780
781 /* TODO: For now we put this in the mappable region so that we can reuse
782 * the existing ringbuffer code which ioremaps it. When we start
783 * creating many contexts, this will no longer work and we must switch
784 * to a kmapish interface.
785 */
786 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
787 if (ret) {
788 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
789 ring->name, ret);
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100790 goto error;
791 }
792
793 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
794 if (ret) {
795 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
796 intel_destroy_ringbuffer_obj(ringbuf);
797 goto error;
Oscar Mateo84c23772014-07-24 17:04:15 +0100798 }
799
800 ctx->engine[ring->id].ringbuf = ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +0100801 ctx->engine[ring->id].state = ctx_obj;
Oscar Mateoede7d422014-07-24 17:04:12 +0100802
803 return 0;
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100804
805error:
806 kfree(ringbuf);
807 i915_gem_object_ggtt_unpin(ctx_obj);
808 drm_gem_object_unreference(&ctx_obj->base);
809 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +0100810}