blob: c9518c6261dee844b4011e8c32858f0c73b1def3 [file] [log] [blame]
Oscar Mateob20385f2014-07-24 17:04:10 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
31/*
32 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
33 * These expanded contexts enable a number of new abilities, especially
34 * "Execlists" (also implemented in this file).
35 *
36 * Execlists are the new method by which, on gen8+ hardware, workloads are
37 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
38 */
39
40#include <drm/drmP.h>
41#include <drm/i915_drm.h>
42#include "i915_drv.h"
Oscar Mateo127f1002014-07-24 17:04:11 +010043
Oscar Mateo8c8579172014-07-24 17:04:14 +010044#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
45#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
46
47#define GEN8_LR_CONTEXT_ALIGN 4096
48
Oscar Mateo8670d6f2014-07-24 17:04:17 +010049#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
50#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
51
52#define CTX_LRI_HEADER_0 0x01
53#define CTX_CONTEXT_CONTROL 0x02
54#define CTX_RING_HEAD 0x04
55#define CTX_RING_TAIL 0x06
56#define CTX_RING_BUFFER_START 0x08
57#define CTX_RING_BUFFER_CONTROL 0x0a
58#define CTX_BB_HEAD_U 0x0c
59#define CTX_BB_HEAD_L 0x0e
60#define CTX_BB_STATE 0x10
61#define CTX_SECOND_BB_HEAD_U 0x12
62#define CTX_SECOND_BB_HEAD_L 0x14
63#define CTX_SECOND_BB_STATE 0x16
64#define CTX_BB_PER_CTX_PTR 0x18
65#define CTX_RCS_INDIRECT_CTX 0x1a
66#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
67#define CTX_LRI_HEADER_1 0x21
68#define CTX_CTX_TIMESTAMP 0x22
69#define CTX_PDP3_UDW 0x24
70#define CTX_PDP3_LDW 0x26
71#define CTX_PDP2_UDW 0x28
72#define CTX_PDP2_LDW 0x2a
73#define CTX_PDP1_UDW 0x2c
74#define CTX_PDP1_LDW 0x2e
75#define CTX_PDP0_UDW 0x30
76#define CTX_PDP0_LDW 0x32
77#define CTX_LRI_HEADER_2 0x41
78#define CTX_R_PWR_CLK_STATE 0x42
79#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
80
Oscar Mateo127f1002014-07-24 17:04:11 +010081int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
82{
Daniel Vetterbd84b1e2014-08-11 15:57:57 +020083 WARN_ON(i915.enable_ppgtt == -1);
84
Oscar Mateo127f1002014-07-24 17:04:11 +010085 if (enable_execlists == 0)
86 return 0;
87
88 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev))
89 return 1;
90
91 return 0;
92}
Oscar Mateoede7d422014-07-24 17:04:12 +010093
Oscar Mateo454afeb2014-07-24 17:04:22 +010094int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
95 struct intel_engine_cs *ring,
96 struct intel_context *ctx,
97 struct drm_i915_gem_execbuffer2 *args,
98 struct list_head *vmas,
99 struct drm_i915_gem_object *batch_obj,
100 u64 exec_start, u32 flags)
101{
102 /* TODO */
103 return 0;
104}
105
106void intel_logical_ring_stop(struct intel_engine_cs *ring)
107{
108 /* TODO */
109}
110
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100111static int gen8_init_common_ring(struct intel_engine_cs *ring)
112{
113 struct drm_device *dev = ring->dev;
114 struct drm_i915_private *dev_priv = dev->dev_private;
115
116 I915_WRITE(RING_MODE_GEN7(ring),
117 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
118 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
119 POSTING_READ(RING_MODE_GEN7(ring));
120 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
121
122 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
123
124 return 0;
125}
126
127static int gen8_init_render_ring(struct intel_engine_cs *ring)
128{
129 struct drm_device *dev = ring->dev;
130 struct drm_i915_private *dev_priv = dev->dev_private;
131 int ret;
132
133 ret = gen8_init_common_ring(ring);
134 if (ret)
135 return ret;
136
137 /* We need to disable the AsyncFlip performance optimisations in order
138 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
139 * programmed to '1' on all products.
140 *
141 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
142 */
143 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
144
145 ret = intel_init_pipe_control(ring);
146 if (ret)
147 return ret;
148
149 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
150
151 return ret;
152}
153
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100154static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
155{
156 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
157}
158
159static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
160{
161 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
162}
163
Oscar Mateo454afeb2014-07-24 17:04:22 +0100164void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
165{
Oscar Mateo48d82382014-07-24 17:04:23 +0100166 if (!intel_ring_initialized(ring))
167 return;
168
169 /* TODO: make sure the ring is stopped */
170 ring->preallocated_lazy_request = NULL;
171 ring->outstanding_lazy_seqno = 0;
172
173 if (ring->cleanup)
174 ring->cleanup(ring);
175
176 i915_cmd_parser_fini_ring(ring);
177
178 if (ring->status_page.obj) {
179 kunmap(sg_page(ring->status_page.obj->pages->sgl));
180 ring->status_page.obj = NULL;
181 }
Oscar Mateo454afeb2014-07-24 17:04:22 +0100182}
183
184static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
185{
Oscar Mateo48d82382014-07-24 17:04:23 +0100186 int ret;
187 struct intel_context *dctx = ring->default_context;
188 struct drm_i915_gem_object *dctx_obj;
189
190 /* Intentionally left blank. */
191 ring->buffer = NULL;
192
193 ring->dev = dev;
194 INIT_LIST_HEAD(&ring->active_list);
195 INIT_LIST_HEAD(&ring->request_list);
196 init_waitqueue_head(&ring->irq_queue);
197
198 ret = intel_lr_context_deferred_create(dctx, ring);
199 if (ret)
200 return ret;
201
202 /* The status page is offset 0 from the context object in LRCs. */
203 dctx_obj = dctx->engine[ring->id].state;
204 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
205 ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
206 if (ring->status_page.page_addr == NULL)
207 return -ENOMEM;
208 ring->status_page.obj = dctx_obj;
209
210 ret = i915_cmd_parser_init_ring(ring);
211 if (ret)
212 return ret;
213
214 if (ring->init) {
215 ret = ring->init(ring);
216 if (ret)
217 return ret;
218 }
219
Oscar Mateo454afeb2014-07-24 17:04:22 +0100220 return 0;
221}
222
223static int logical_render_ring_init(struct drm_device *dev)
224{
225 struct drm_i915_private *dev_priv = dev->dev_private;
226 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
227
228 ring->name = "render ring";
229 ring->id = RCS;
230 ring->mmio_base = RENDER_RING_BASE;
231 ring->irq_enable_mask =
232 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
233
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100234 ring->init = gen8_init_render_ring;
235 ring->cleanup = intel_fini_pipe_control;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100236 ring->get_seqno = gen8_get_seqno;
237 ring->set_seqno = gen8_set_seqno;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100238
Oscar Mateo454afeb2014-07-24 17:04:22 +0100239 return logical_ring_init(dev, ring);
240}
241
242static int logical_bsd_ring_init(struct drm_device *dev)
243{
244 struct drm_i915_private *dev_priv = dev->dev_private;
245 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
246
247 ring->name = "bsd ring";
248 ring->id = VCS;
249 ring->mmio_base = GEN6_BSD_RING_BASE;
250 ring->irq_enable_mask =
251 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
252
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100253 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100254 ring->get_seqno = gen8_get_seqno;
255 ring->set_seqno = gen8_set_seqno;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100256
Oscar Mateo454afeb2014-07-24 17:04:22 +0100257 return logical_ring_init(dev, ring);
258}
259
260static int logical_bsd2_ring_init(struct drm_device *dev)
261{
262 struct drm_i915_private *dev_priv = dev->dev_private;
263 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
264
265 ring->name = "bds2 ring";
266 ring->id = VCS2;
267 ring->mmio_base = GEN8_BSD2_RING_BASE;
268 ring->irq_enable_mask =
269 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
270
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100271 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100272 ring->get_seqno = gen8_get_seqno;
273 ring->set_seqno = gen8_set_seqno;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100274
Oscar Mateo454afeb2014-07-24 17:04:22 +0100275 return logical_ring_init(dev, ring);
276}
277
278static int logical_blt_ring_init(struct drm_device *dev)
279{
280 struct drm_i915_private *dev_priv = dev->dev_private;
281 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
282
283 ring->name = "blitter ring";
284 ring->id = BCS;
285 ring->mmio_base = BLT_RING_BASE;
286 ring->irq_enable_mask =
287 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
288
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100289 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100290 ring->get_seqno = gen8_get_seqno;
291 ring->set_seqno = gen8_set_seqno;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100292
Oscar Mateo454afeb2014-07-24 17:04:22 +0100293 return logical_ring_init(dev, ring);
294}
295
296static int logical_vebox_ring_init(struct drm_device *dev)
297{
298 struct drm_i915_private *dev_priv = dev->dev_private;
299 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
300
301 ring->name = "video enhancement ring";
302 ring->id = VECS;
303 ring->mmio_base = VEBOX_RING_BASE;
304 ring->irq_enable_mask =
305 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
306
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100307 ring->init = gen8_init_common_ring;
Oscar Mateoe94e37a2014-07-24 17:04:25 +0100308 ring->get_seqno = gen8_get_seqno;
309 ring->set_seqno = gen8_set_seqno;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100310
Oscar Mateo454afeb2014-07-24 17:04:22 +0100311 return logical_ring_init(dev, ring);
312}
313
314int intel_logical_rings_init(struct drm_device *dev)
315{
316 struct drm_i915_private *dev_priv = dev->dev_private;
317 int ret;
318
319 ret = logical_render_ring_init(dev);
320 if (ret)
321 return ret;
322
323 if (HAS_BSD(dev)) {
324 ret = logical_bsd_ring_init(dev);
325 if (ret)
326 goto cleanup_render_ring;
327 }
328
329 if (HAS_BLT(dev)) {
330 ret = logical_blt_ring_init(dev);
331 if (ret)
332 goto cleanup_bsd_ring;
333 }
334
335 if (HAS_VEBOX(dev)) {
336 ret = logical_vebox_ring_init(dev);
337 if (ret)
338 goto cleanup_blt_ring;
339 }
340
341 if (HAS_BSD2(dev)) {
342 ret = logical_bsd2_ring_init(dev);
343 if (ret)
344 goto cleanup_vebox_ring;
345 }
346
347 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
348 if (ret)
349 goto cleanup_bsd2_ring;
350
351 return 0;
352
353cleanup_bsd2_ring:
354 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
355cleanup_vebox_ring:
356 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
357cleanup_blt_ring:
358 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
359cleanup_bsd_ring:
360 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
361cleanup_render_ring:
362 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
363
364 return ret;
365}
366
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100367static int
368populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
369 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
370{
371 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
372 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
373 struct page *page;
374 uint32_t *reg_state;
375 int ret;
376
377 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
378 if (ret) {
379 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
380 return ret;
381 }
382
383 ret = i915_gem_object_get_pages(ctx_obj);
384 if (ret) {
385 DRM_DEBUG_DRIVER("Could not get object pages\n");
386 return ret;
387 }
388
389 i915_gem_object_pin_pages(ctx_obj);
390
391 /* The second page of the context object contains some fields which must
392 * be set up prior to the first execution. */
393 page = i915_gem_object_get_page(ctx_obj, 1);
394 reg_state = kmap_atomic(page);
395
396 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
397 * commands followed by (reg, value) pairs. The values we are setting here are
398 * only for the first context restore: on a subsequent save, the GPU will
399 * recreate this batchbuffer with new values (including all the missing
400 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
401 if (ring->id == RCS)
402 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
403 else
404 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
405 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
406 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
407 reg_state[CTX_CONTEXT_CONTROL+1] =
408 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
409 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
410 reg_state[CTX_RING_HEAD+1] = 0;
411 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
412 reg_state[CTX_RING_TAIL+1] = 0;
413 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
414 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
415 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
416 reg_state[CTX_RING_BUFFER_CONTROL+1] =
417 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
418 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
419 reg_state[CTX_BB_HEAD_U+1] = 0;
420 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
421 reg_state[CTX_BB_HEAD_L+1] = 0;
422 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
423 reg_state[CTX_BB_STATE+1] = (1<<5);
424 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
425 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
426 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
427 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
428 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
429 reg_state[CTX_SECOND_BB_STATE+1] = 0;
430 if (ring->id == RCS) {
431 /* TODO: according to BSpec, the register state context
432 * for CHV does not have these. OTOH, these registers do
433 * exist in CHV. I'm waiting for a clarification */
434 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
435 reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
436 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
437 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
438 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
439 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
440 }
441 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
442 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
443 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
444 reg_state[CTX_CTX_TIMESTAMP+1] = 0;
445 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
446 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
447 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
448 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
449 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
450 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
451 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
452 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
453 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
454 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
455 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
456 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
457 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
458 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
459 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
460 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
461 if (ring->id == RCS) {
462 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
463 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
464 reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
465 }
466
467 kunmap_atomic(reg_state);
468
469 ctx_obj->dirty = 1;
470 set_page_dirty(page);
471 i915_gem_object_unpin_pages(ctx_obj);
472
473 return 0;
474}
475
Oscar Mateoede7d422014-07-24 17:04:12 +0100476void intel_lr_context_free(struct intel_context *ctx)
477{
Oscar Mateo8c8579172014-07-24 17:04:14 +0100478 int i;
479
480 for (i = 0; i < I915_NUM_RINGS; i++) {
481 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
Oscar Mateo84c23772014-07-24 17:04:15 +0100482 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
483
Oscar Mateo8c8579172014-07-24 17:04:14 +0100484 if (ctx_obj) {
Oscar Mateo84c23772014-07-24 17:04:15 +0100485 intel_destroy_ringbuffer_obj(ringbuf);
486 kfree(ringbuf);
Oscar Mateo8c8579172014-07-24 17:04:14 +0100487 i915_gem_object_ggtt_unpin(ctx_obj);
488 drm_gem_object_unreference(&ctx_obj->base);
489 }
490 }
491}
492
493static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
494{
495 int ret = 0;
496
497 WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
498
499 switch (ring->id) {
500 case RCS:
501 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
502 break;
503 case VCS:
504 case BCS:
505 case VECS:
506 case VCS2:
507 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
508 break;
509 }
510
511 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +0100512}
513
514int intel_lr_context_deferred_create(struct intel_context *ctx,
515 struct intel_engine_cs *ring)
516{
Oscar Mateo8c8579172014-07-24 17:04:14 +0100517 struct drm_device *dev = ring->dev;
518 struct drm_i915_gem_object *ctx_obj;
519 uint32_t context_size;
Oscar Mateo84c23772014-07-24 17:04:15 +0100520 struct intel_ringbuffer *ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +0100521 int ret;
522
Oscar Mateoede7d422014-07-24 17:04:12 +0100523 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
Oscar Mateo48d82382014-07-24 17:04:23 +0100524 if (ctx->engine[ring->id].state)
525 return 0;
Oscar Mateoede7d422014-07-24 17:04:12 +0100526
Oscar Mateo8c8579172014-07-24 17:04:14 +0100527 context_size = round_up(get_lr_context_size(ring), 4096);
528
529 ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
530 if (IS_ERR(ctx_obj)) {
531 ret = PTR_ERR(ctx_obj);
532 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
533 return ret;
534 }
535
536 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
537 if (ret) {
538 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
539 drm_gem_object_unreference(&ctx_obj->base);
540 return ret;
541 }
542
Oscar Mateo84c23772014-07-24 17:04:15 +0100543 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
544 if (!ringbuf) {
545 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
546 ring->name);
547 i915_gem_object_ggtt_unpin(ctx_obj);
548 drm_gem_object_unreference(&ctx_obj->base);
549 ret = -ENOMEM;
550 return ret;
551 }
552
Daniel Vetter0c7dd532014-08-11 16:17:44 +0200553 ringbuf->ring = ring;
Oscar Mateo84c23772014-07-24 17:04:15 +0100554 ringbuf->size = 32 * PAGE_SIZE;
555 ringbuf->effective_size = ringbuf->size;
556 ringbuf->head = 0;
557 ringbuf->tail = 0;
558 ringbuf->space = ringbuf->size;
559 ringbuf->last_retired_head = -1;
560
561 /* TODO: For now we put this in the mappable region so that we can reuse
562 * the existing ringbuffer code which ioremaps it. When we start
563 * creating many contexts, this will no longer work and we must switch
564 * to a kmapish interface.
565 */
566 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
567 if (ret) {
568 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
569 ring->name, ret);
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100570 goto error;
571 }
572
573 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
574 if (ret) {
575 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
576 intel_destroy_ringbuffer_obj(ringbuf);
577 goto error;
Oscar Mateo84c23772014-07-24 17:04:15 +0100578 }
579
580 ctx->engine[ring->id].ringbuf = ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +0100581 ctx->engine[ring->id].state = ctx_obj;
Oscar Mateoede7d422014-07-24 17:04:12 +0100582
583 return 0;
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100584
585error:
586 kfree(ringbuf);
587 i915_gem_object_ggtt_unpin(ctx_obj);
588 drm_gem_object_unreference(&ctx_obj->base);
589 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +0100590}