blob: b760e90a6a90174f0e512457edc25efd21aedbf6 [file] [log] [blame]
Oscar Mateob20385f2014-07-24 17:04:10 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
Oscar Mateo73e4d072014-07-24 17:04:48 +010031/**
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
33 *
34 * Motivation:
Oscar Mateob20385f2014-07-24 17:04:10 +010035 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
38 *
Oscar Mateo73e4d072014-07-24 17:04:48 +010039 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
42 *
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
47 *
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
55 *
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
58 *
59 * LRC implementation:
60 * Regarding the creation of contexts, we have:
61 *
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
65 *
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
69 *
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
72 *
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
77 * contexts:
78 *
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
83 * so on.
84 *
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
88 *
89 * Execlists implementation:
Oscar Mateob20385f2014-07-24 17:04:10 +010090 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
Oscar Mateo73e4d072014-07-24 17:04:48 +010092 * This method works as follows:
93 *
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
101 * context itself.
102 *
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
108 *
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
114 *
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
122 *
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
132 *
Oscar Mateob20385f2014-07-24 17:04:10 +0100133 */
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100134#include <linux/interrupt.h>
Oscar Mateob20385f2014-07-24 17:04:10 +0100135
136#include <drm/drmP.h>
137#include <drm/i915_drm.h>
138#include "i915_drv.h"
Peter Antoine3bbaba02015-07-10 20:13:11 +0300139#include "intel_mocs.h"
Oscar Mateo127f1002014-07-24 17:04:11 +0100140
Michael H. Nguyen468c6812014-11-13 17:51:49 +0000141#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
Oscar Mateo8c8579172014-07-24 17:04:14 +0100142#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144
Thomas Daniele981e7b2014-07-24 17:04:39 +0100145#define RING_EXECLIST_QFULL (1 << 0x2)
146#define RING_EXECLIST1_VALID (1 << 0x3)
147#define RING_EXECLIST0_VALID (1 << 0x4)
148#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
149#define RING_EXECLIST1_ACTIVE (1 << 0x11)
150#define RING_EXECLIST0_ACTIVE (1 << 0x12)
151
152#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
153#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
154#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
155#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
156#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
157#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100158
159#define CTX_LRI_HEADER_0 0x01
160#define CTX_CONTEXT_CONTROL 0x02
161#define CTX_RING_HEAD 0x04
162#define CTX_RING_TAIL 0x06
163#define CTX_RING_BUFFER_START 0x08
164#define CTX_RING_BUFFER_CONTROL 0x0a
165#define CTX_BB_HEAD_U 0x0c
166#define CTX_BB_HEAD_L 0x0e
167#define CTX_BB_STATE 0x10
168#define CTX_SECOND_BB_HEAD_U 0x12
169#define CTX_SECOND_BB_HEAD_L 0x14
170#define CTX_SECOND_BB_STATE 0x16
171#define CTX_BB_PER_CTX_PTR 0x18
172#define CTX_RCS_INDIRECT_CTX 0x1a
173#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
174#define CTX_LRI_HEADER_1 0x21
175#define CTX_CTX_TIMESTAMP 0x22
176#define CTX_PDP3_UDW 0x24
177#define CTX_PDP3_LDW 0x26
178#define CTX_PDP2_UDW 0x28
179#define CTX_PDP2_LDW 0x2a
180#define CTX_PDP1_UDW 0x2c
181#define CTX_PDP1_LDW 0x2e
182#define CTX_PDP0_UDW 0x30
183#define CTX_PDP0_LDW 0x32
184#define CTX_LRI_HEADER_2 0x41
185#define CTX_R_PWR_CLK_STATE 0x42
186#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
187
Ben Widawsky84b790f2014-07-24 17:04:36 +0100188#define GEN8_CTX_VALID (1<<0)
189#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
190#define GEN8_CTX_FORCE_RESTORE (1<<2)
191#define GEN8_CTX_L3LLC_COHERENT (1<<5)
192#define GEN8_CTX_PRIVILEGE (1<<8)
Michel Thierrye5815a22015-04-08 12:13:32 +0100193
Ville Syrjälä0d925ea2015-11-04 23:20:11 +0200194#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200195 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
Ville Syrjälä0d925ea2015-11-04 23:20:11 +0200196 (reg_state)[(pos)+1] = (val); \
197} while (0)
198
199#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300200 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
Michel Thierrye5815a22015-04-08 12:13:32 +0100201 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
202 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
Ville Syrjälä9244a812015-11-04 23:20:09 +0200203} while (0)
Michel Thierrye5815a22015-04-08 12:13:32 +0100204
Ville Syrjälä9244a812015-11-04 23:20:09 +0200205#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
Michel Thierry2dba3232015-07-30 11:06:23 +0100206 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
207 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
Ville Syrjälä9244a812015-11-04 23:20:09 +0200208} while (0)
Michel Thierry2dba3232015-07-30 11:06:23 +0100209
Ben Widawsky84b790f2014-07-24 17:04:36 +0100210enum {
Ben Widawsky84b790f2014-07-24 17:04:36 +0100211 FAULT_AND_HANG = 0,
212 FAULT_AND_HALT, /* Debug only */
213 FAULT_AND_STREAM,
214 FAULT_AND_CONTINUE /* Unsupported */
215};
216#define GEN8_CTX_ID_SHIFT 32
Chris Wilson7069b142016-04-28 09:56:52 +0100217#define GEN8_CTX_ID_WIDTH 21
Michel Thierry71562912016-02-23 10:31:49 +0000218#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
219#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
Ben Widawsky84b790f2014-07-24 17:04:36 +0100220
Chris Wilson0e93cdd2016-04-29 09:07:06 +0100221/* Typical size of the average request (2 pipecontrols and a MI_BB) */
222#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
223
Chris Wilsone2efd132016-05-24 14:53:34 +0100224static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
Chris Wilson978f1e02016-04-28 09:56:54 +0100225 struct intel_engine_cs *engine);
Chris Wilsone2efd132016-05-24 14:53:34 +0100226static int intel_lr_context_pin(struct i915_gem_context *ctx,
Tvrtko Ursuline52928232016-01-28 10:29:54 +0000227 struct intel_engine_cs *engine);
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000228
Oscar Mateo73e4d072014-07-24 17:04:48 +0100229/**
230 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100231 * @dev_priv: i915 device private
Oscar Mateo73e4d072014-07-24 17:04:48 +0100232 * @enable_execlists: value of i915.enable_execlists module parameter.
233 *
234 * Only certain platforms support Execlists (the prerequisites being
Thomas Daniel27401d12014-12-11 12:48:35 +0000235 * support for Logical Ring Contexts and Aliasing PPGTT or better).
Oscar Mateo73e4d072014-07-24 17:04:48 +0100236 *
237 * Return: 1 if Execlists is supported and has to be enabled.
238 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100239int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
Oscar Mateo127f1002014-07-24 17:04:11 +0100240{
Zhiyuan Lva0bd6c32015-08-28 15:41:16 +0800241 /* On platforms with execlist available, vGPU will only
242 * support execlist mode, no ring buffer mode.
243 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100244 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
Zhiyuan Lva0bd6c32015-08-28 15:41:16 +0800245 return 1;
246
Chris Wilsonc0336662016-05-06 15:40:21 +0100247 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau70ee45e2014-11-14 15:05:59 +0000248 return 1;
249
Oscar Mateo127f1002014-07-24 17:04:11 +0100250 if (enable_execlists == 0)
251 return 0;
252
Daniel Vetter5a21b662016-05-24 17:13:53 +0200253 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
254 USES_PPGTT(dev_priv) &&
255 i915.use_mmio_flip >= 0)
Oscar Mateo127f1002014-07-24 17:04:11 +0100256 return 1;
257
258 return 0;
259}
Oscar Mateoede7d422014-07-24 17:04:12 +0100260
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000261static void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000262logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000263{
Chris Wilsonc0336662016-05-06 15:40:21 +0100264 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000265
Chris Wilsonc0336662016-05-06 15:40:21 +0100266 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000267 engine->idle_lite_restore_wa = ~0;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000268
Chris Wilsonc0336662016-05-06 15:40:21 +0100269 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
270 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000271 (engine->id == VCS || engine->id == VCS2);
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000272
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000273 engine->ctx_desc_template = GEN8_CTX_VALID;
Chris Wilsonc0336662016-05-06 15:40:21 +0100274 if (IS_GEN8(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000275 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
276 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000277
278 /* TODO: WaDisableLiteRestore when we start using semaphore
279 * signalling between Command Streamers */
280 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
281
282 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
283 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000284 if (engine->disable_lite_restore_wa)
285 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000286}
287
288/**
289 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
290 * descriptor for a pinned context
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000291 * @ctx: Context to work on
Chris Wilson9021ad02016-05-24 14:53:37 +0100292 * @engine: Engine the descriptor will be used with
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000293 *
294 * The context descriptor encodes various attributes of a context,
295 * including its GTT address and some flags. Because it's fairly
296 * expensive to calculate, we'll just do it once and cache the result,
297 * which remains valid until the context is unpinned.
298 *
Daniel Vetter6e5248b2016-07-15 21:48:06 +0200299 * This is what a descriptor looks like, from LSB to MSB::
300 *
301 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
302 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
303 * bits 32-52: ctx ID, a globally unique tag
304 * bits 53-54: mbz, reserved for use by hardware
305 * bits 55-63: group ID, currently unused and set to 0
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000306 */
307static void
Chris Wilsone2efd132016-05-24 14:53:34 +0100308intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000309 struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000310{
Chris Wilson9021ad02016-05-24 14:53:37 +0100311 struct intel_context *ce = &ctx->engine[engine->id];
Chris Wilson7069b142016-04-28 09:56:52 +0100312 u64 desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000313
Chris Wilson7069b142016-04-28 09:56:52 +0100314 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
315
Zhi Wangc01fc532016-06-16 08:07:02 -0400316 desc = ctx->desc_template; /* bits 3-4 */
317 desc |= engine->ctx_desc_template; /* bits 0-11 */
Chris Wilson9021ad02016-05-24 14:53:37 +0100318 desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
319 /* bits 12-31 */
Chris Wilson7069b142016-04-28 09:56:52 +0100320 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000321
Chris Wilson9021ad02016-05-24 14:53:37 +0100322 ce->lrc_desc = desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000323}
324
Chris Wilsone2efd132016-05-24 14:53:34 +0100325uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000326 struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000327{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000328 return ctx->engine[engine->id].lrc_desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000329}
330
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300331static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
332 struct drm_i915_gem_request *rq1)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100333{
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300334
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000335 struct intel_engine_cs *engine = rq0->engine;
Chris Wilsonc0336662016-05-06 15:40:21 +0100336 struct drm_i915_private *dev_priv = rq0->i915;
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300337 uint64_t desc[2];
Ben Widawsky84b790f2014-07-24 17:04:36 +0100338
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300339 if (rq1) {
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000340 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300341 rq1->elsp_submitted++;
342 } else {
343 desc[1] = 0;
344 }
Ben Widawsky84b790f2014-07-24 17:04:36 +0100345
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000346 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300347 rq0->elsp_submitted++;
Ben Widawsky84b790f2014-07-24 17:04:36 +0100348
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300349 /* You must always write both descriptors in the order below. */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000350 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
351 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
Chris Wilson6daccb02015-01-16 11:34:35 +0200352
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000353 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100354 /* The context is automatically loaded after the following */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000355 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100356
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300357 /* ELSP is a wo register, use another nearby reg for posting */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000358 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100359}
360
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000361static void
362execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
363{
364 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
365 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
366 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
367 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
368}
369
370static void execlists_update_context(struct drm_i915_gem_request *rq)
Oscar Mateoae1250b2014-07-24 17:04:37 +0100371{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000372 struct intel_engine_cs *engine = rq->engine;
Mika Kuoppala05d98242015-07-03 17:09:33 +0300373 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000374 uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
Oscar Mateoae1250b2014-07-24 17:04:37 +0100375
Mika Kuoppala05d98242015-07-03 17:09:33 +0300376 reg_state[CTX_RING_TAIL+1] = rq->tail;
Oscar Mateoae1250b2014-07-24 17:04:37 +0100377
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000378 /* True 32b PPGTT with dynamic page allocation: update PDP
379 * registers and point the unallocated PDPs to scratch page.
380 * PML4 is allocated during ppgtt init, so this is not needed
381 * in 48-bit mode.
382 */
383 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
384 execlists_update_context_pdps(ppgtt, reg_state);
Oscar Mateoae1250b2014-07-24 17:04:37 +0100385}
386
Mika Kuoppalad8cb8872015-07-03 17:09:32 +0300387static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
388 struct drm_i915_gem_request *rq1)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100389{
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000390 struct drm_i915_private *dev_priv = rq0->i915;
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100391 unsigned int fw_domains = rq0->engine->fw_domains;
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000392
Mika Kuoppala05d98242015-07-03 17:09:33 +0300393 execlists_update_context(rq0);
Oscar Mateoae1250b2014-07-24 17:04:37 +0100394
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300395 if (rq1)
Mika Kuoppala05d98242015-07-03 17:09:33 +0300396 execlists_update_context(rq1);
Ben Widawsky84b790f2014-07-24 17:04:36 +0100397
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100398 spin_lock_irq(&dev_priv->uncore.lock);
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100399 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000400
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300401 execlists_elsp_write(rq0, rq1);
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000402
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100403 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100404 spin_unlock_irq(&dev_priv->uncore.lock);
Ben Widawsky84b790f2014-07-24 17:04:36 +0100405}
406
Zhi Wang3c7ba632016-06-16 08:07:03 -0400407static inline void execlists_context_status_change(
408 struct drm_i915_gem_request *rq,
409 unsigned long status)
410{
411 /*
412 * Only used when GVT-g is enabled now. When GVT-g is disabled,
413 * The compiler should eliminate this function as dead-code.
414 */
415 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
416 return;
417
418 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
419}
420
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000421static void execlists_context_unqueue(struct intel_engine_cs *engine)
Michel Thierryacdd8842014-07-24 17:04:38 +0100422{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000423 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000424 struct drm_i915_gem_request *cursor, *tmp;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100425
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000426 assert_spin_locked(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100427
Peter Antoine779949f2015-05-11 16:03:27 +0100428 /*
429 * If irqs are not active generate a warning as batches that finish
430 * without the irqs may get lost and a GPU Hang may occur.
431 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100432 WARN_ON(!intel_irqs_enabled(engine->i915));
Peter Antoine779949f2015-05-11 16:03:27 +0100433
Michel Thierryacdd8842014-07-24 17:04:38 +0100434 /* Try to read in pairs */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000435 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
Michel Thierryacdd8842014-07-24 17:04:38 +0100436 execlist_link) {
437 if (!req0) {
438 req0 = cursor;
Nick Hoath6d3d8272015-01-15 13:10:39 +0000439 } else if (req0->ctx == cursor->ctx) {
Michel Thierryacdd8842014-07-24 17:04:38 +0100440 /* Same ctx: ignore first request, as second request
441 * will update tail past first request's workload */
Oscar Mateoe1fee722014-07-24 17:04:40 +0100442 cursor->elsp_submitted = req0->elsp_submitted;
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100443 list_del(&req0->execlist_link);
Chris Wilsone8a261e2016-07-20 13:31:49 +0100444 i915_gem_request_put(req0);
Michel Thierryacdd8842014-07-24 17:04:38 +0100445 req0 = cursor;
446 } else {
Zhi Wang80a9a8d2016-06-16 08:07:04 -0400447 if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
448 /*
449 * req0 (after merged) ctx requires single
450 * submission, stop picking
451 */
452 if (req0->ctx->execlists_force_single_submission)
453 break;
454 /*
455 * req0 ctx doesn't require single submission,
456 * but next req ctx requires, stop picking
457 */
458 if (cursor->ctx->execlists_force_single_submission)
459 break;
460 }
Michel Thierryacdd8842014-07-24 17:04:38 +0100461 req1 = cursor;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000462 WARN_ON(req1->elsp_submitted);
Michel Thierryacdd8842014-07-24 17:04:38 +0100463 break;
464 }
465 }
466
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000467 if (unlikely(!req0))
468 return;
469
Zhi Wang3c7ba632016-06-16 08:07:03 -0400470 execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
471
472 if (req1)
473 execlists_context_status_change(req1,
474 INTEL_CONTEXT_SCHEDULE_IN);
475
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000476 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
Michel Thierry53292cd2015-04-15 18:11:33 +0100477 /*
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000478 * WaIdleLiteRestore: make sure we never cause a lite restore
479 * with HEAD==TAIL.
480 *
481 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
482 * resubmit the request. See gen8_emit_request() for where we
483 * prepare the padding after the end of the request.
Michel Thierry53292cd2015-04-15 18:11:33 +0100484 */
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000485 req0->tail += 8;
Chris Wilsondca33ec2016-08-02 22:50:20 +0100486 req0->tail &= req0->ring->size - 1;
Michel Thierry53292cd2015-04-15 18:11:33 +0100487 }
488
Mika Kuoppalad8cb8872015-07-03 17:09:32 +0300489 execlists_submit_requests(req0, req1);
Michel Thierryacdd8842014-07-24 17:04:38 +0100490}
491
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000492static unsigned int
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100493execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
Thomas Daniele981e7b2014-07-24 17:04:39 +0100494{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000495 struct drm_i915_gem_request *head_req;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100496
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000497 assert_spin_locked(&engine->execlist_lock);
Thomas Daniele981e7b2014-07-24 17:04:39 +0100498
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000499 head_req = list_first_entry_or_null(&engine->execlist_queue,
Nick Hoath6d3d8272015-01-15 13:10:39 +0000500 struct drm_i915_gem_request,
Thomas Daniele981e7b2014-07-24 17:04:39 +0100501 execlist_link);
502
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100503 if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
504 return 0;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100505
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000506 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
507
508 if (--head_req->elsp_submitted > 0)
509 return 0;
510
Zhi Wang3c7ba632016-06-16 08:07:03 -0400511 execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
512
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100513 list_del(&head_req->execlist_link);
Chris Wilsone8a261e2016-07-20 13:31:49 +0100514 i915_gem_request_put(head_req);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000515
516 return 1;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100517}
518
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000519static u32
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000520get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000521 u32 *context_id)
Ben Widawsky91a41032016-01-05 10:30:07 -0800522{
Chris Wilsonc0336662016-05-06 15:40:21 +0100523 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000524 u32 status;
Ben Widawsky91a41032016-01-05 10:30:07 -0800525
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000526 read_pointer %= GEN8_CSB_ENTRIES;
Ben Widawsky91a41032016-01-05 10:30:07 -0800527
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000528 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000529
530 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
531 return 0;
532
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000533 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000534 read_pointer));
535
536 return status;
Ben Widawsky91a41032016-01-05 10:30:07 -0800537}
538
Daniel Vetter6e5248b2016-07-15 21:48:06 +0200539/*
Oscar Mateo73e4d072014-07-24 17:04:48 +0100540 * Check the unread Context Status Buffers and manage the submission of new
541 * contexts to the ELSP accordingly.
542 */
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100543static void intel_lrc_irq_handler(unsigned long data)
Thomas Daniele981e7b2014-07-24 17:04:39 +0100544{
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100545 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
Chris Wilsonc0336662016-05-06 15:40:21 +0100546 struct drm_i915_private *dev_priv = engine->i915;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100547 u32 status_pointer;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000548 unsigned int read_pointer, write_pointer;
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000549 u32 csb[GEN8_CSB_ENTRIES][2];
550 unsigned int csb_read = 0, i;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000551 unsigned int submit_contexts = 0;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100552
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100553 intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000554
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000555 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
Thomas Daniele981e7b2014-07-24 17:04:39 +0100556
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000557 read_pointer = engine->next_context_status_buffer;
Ben Widawsky5590a5f2016-01-05 10:30:05 -0800558 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
Thomas Daniele981e7b2014-07-24 17:04:39 +0100559 if (read_pointer > write_pointer)
Michel Thierrydfc53c52015-09-28 13:25:12 +0100560 write_pointer += GEN8_CSB_ENTRIES;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100561
Thomas Daniele981e7b2014-07-24 17:04:39 +0100562 while (read_pointer < write_pointer) {
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000563 if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
564 break;
565 csb[csb_read][0] = get_context_status(engine, ++read_pointer,
566 &csb[csb_read][1]);
567 csb_read++;
Michel Thierry5af05fe2015-09-04 12:59:15 +0100568 }
Thomas Daniele981e7b2014-07-24 17:04:39 +0100569
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000570 engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100571
Ben Widawsky5590a5f2016-01-05 10:30:05 -0800572 /* Update the read pointer to the old write pointer. Manual ringbuffer
573 * management ftw </sarcasm> */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000574 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000575 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000576 engine->next_context_status_buffer << 8));
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000577
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100578 intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000579
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000580 spin_lock(&engine->execlist_lock);
581
582 for (i = 0; i < csb_read; i++) {
583 if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
584 if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
585 if (execlists_check_remove_request(engine, csb[i][1]))
586 WARN(1, "Lite Restored request removed from queue\n");
587 } else
588 WARN(1, "Preemption without Lite Restore\n");
589 }
590
591 if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
592 GEN8_CTX_STATUS_ELEMENT_SWITCH))
593 submit_contexts +=
594 execlists_check_remove_request(engine, csb[i][1]);
595 }
596
597 if (submit_contexts) {
598 if (!engine->disable_lite_restore_wa ||
599 (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
600 execlists_context_unqueue(engine);
601 }
602
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000603 spin_unlock(&engine->execlist_lock);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000604
605 if (unlikely(submit_contexts > 2))
606 DRM_ERROR("More than two context complete events?\n");
Thomas Daniele981e7b2014-07-24 17:04:39 +0100607}
608
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000609static void execlists_context_queue(struct drm_i915_gem_request *request)
Michel Thierryacdd8842014-07-24 17:04:38 +0100610{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000611 struct intel_engine_cs *engine = request->engine;
Nick Hoath6d3d8272015-01-15 13:10:39 +0000612 struct drm_i915_gem_request *cursor;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100613 int num_elements = 0;
Michel Thierryacdd8842014-07-24 17:04:38 +0100614
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100615 spin_lock_bh(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100616
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000617 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100618 if (++num_elements > 2)
619 break;
620
621 if (num_elements > 2) {
Nick Hoath6d3d8272015-01-15 13:10:39 +0000622 struct drm_i915_gem_request *tail_req;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100623
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000624 tail_req = list_last_entry(&engine->execlist_queue,
Nick Hoath6d3d8272015-01-15 13:10:39 +0000625 struct drm_i915_gem_request,
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100626 execlist_link);
627
John Harrisonae707972015-05-29 17:44:14 +0100628 if (request->ctx == tail_req->ctx) {
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100629 WARN(tail_req->elsp_submitted != 0,
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000630 "More than 2 already-submitted reqs queued\n");
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100631 list_del(&tail_req->execlist_link);
Chris Wilsone8a261e2016-07-20 13:31:49 +0100632 i915_gem_request_put(tail_req);
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100633 }
634 }
635
Chris Wilsone8a261e2016-07-20 13:31:49 +0100636 i915_gem_request_get(request);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000637 list_add_tail(&request->execlist_link, &engine->execlist_queue);
Tvrtko Ursulina3d12762016-04-28 09:56:57 +0100638 request->ctx_hw_id = request->ctx->hw_id;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100639 if (num_elements == 0)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000640 execlists_context_unqueue(engine);
Michel Thierryacdd8842014-07-24 17:04:38 +0100641
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100642 spin_unlock_bh(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100643}
644
John Harrison535fbe82015-05-29 17:43:32 +0100645static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100646 struct list_head *vmas)
647{
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000648 const unsigned other_rings = ~intel_engine_flag(req->engine);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100649 struct i915_vma *vma;
650 uint32_t flush_domains = 0;
651 bool flush_chipset = false;
652 int ret;
653
654 list_for_each_entry(vma, vmas, exec_list) {
655 struct drm_i915_gem_object *obj = vma->obj;
656
Chris Wilson03ade512015-04-27 13:41:18 +0100657 if (obj->active & other_rings) {
Chris Wilson8e637172016-08-02 22:50:26 +0100658 ret = i915_gem_object_sync(obj, req);
Chris Wilson03ade512015-04-27 13:41:18 +0100659 if (ret)
660 return ret;
661 }
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100662
663 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
664 flush_chipset |= i915_gem_clflush_object(obj, false);
665
666 flush_domains |= obj->base.write_domain;
667 }
668
669 if (flush_domains & I915_GEM_DOMAIN_GTT)
670 wmb();
671
672 /* Unconditionally invalidate gpu caches and ensure that we do flush
673 * any residual writes from the previous batch.
674 */
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100675 return req->engine->emit_flush(req, EMIT_INVALIDATE);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100676}
677
John Harrison40e895c2015-05-29 17:43:26 +0100678int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
John Harrisonbc0dce32015-03-19 12:30:07 +0000679{
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100680 struct intel_engine_cs *engine = request->engine;
Chris Wilson9021ad02016-05-24 14:53:37 +0100681 struct intel_context *ce = &request->ctx->engine[engine->id];
Chris Wilsonbfa01202016-04-28 09:56:48 +0100682 int ret;
John Harrisonbc0dce32015-03-19 12:30:07 +0000683
Chris Wilson63103462016-04-28 09:56:49 +0100684 /* Flush enough space to reduce the likelihood of waiting after
685 * we start building the request - in which case we will just
686 * have to repeat work.
687 */
Chris Wilson0e93cdd2016-04-29 09:07:06 +0100688 request->reserved_space += EXECLISTS_REQUEST_SIZE;
Chris Wilson63103462016-04-28 09:56:49 +0100689
Chris Wilson9021ad02016-05-24 14:53:37 +0100690 if (!ce->state) {
Chris Wilson978f1e02016-04-28 09:56:54 +0100691 ret = execlists_context_deferred_alloc(request->ctx, engine);
692 if (ret)
693 return ret;
694 }
695
Chris Wilsondca33ec2016-08-02 22:50:20 +0100696 request->ring = ce->ring;
Mika Kuoppalaf3cc01f2015-07-06 11:08:30 +0300697
Alex Daia7e02192015-12-16 11:45:55 -0800698 if (i915.enable_guc_submission) {
699 /*
700 * Check that the GuC has space for the request before
701 * going any further, as the i915_add_request() call
702 * later on mustn't fail ...
703 */
Dave Gordon7c2c2702016-05-13 15:36:32 +0100704 ret = i915_guc_wq_check_space(request);
Alex Daia7e02192015-12-16 11:45:55 -0800705 if (ret)
706 return ret;
707 }
708
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100709 ret = intel_lr_context_pin(request->ctx, engine);
710 if (ret)
711 return ret;
Dave Gordone28e4042016-01-19 19:02:55 +0000712
Chris Wilsonbfa01202016-04-28 09:56:48 +0100713 ret = intel_ring_begin(request, 0);
714 if (ret)
715 goto err_unpin;
716
Chris Wilson9021ad02016-05-24 14:53:37 +0100717 if (!ce->initialised) {
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100718 ret = engine->init_context(request);
719 if (ret)
720 goto err_unpin;
721
Chris Wilson9021ad02016-05-24 14:53:37 +0100722 ce->initialised = true;
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100723 }
724
725 /* Note that after this point, we have committed to using
726 * this request as it is being used to both track the
727 * state of engine initialisation and liveness of the
728 * golden renderstate above. Think twice before you try
729 * to cancel/unwind this request now.
730 */
731
Chris Wilson0e93cdd2016-04-29 09:07:06 +0100732 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
Chris Wilsonbfa01202016-04-28 09:56:48 +0100733 return 0;
734
735err_unpin:
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100736 intel_lr_context_unpin(request->ctx, engine);
Dave Gordone28e4042016-01-19 19:02:55 +0000737 return ret;
John Harrisonbc0dce32015-03-19 12:30:07 +0000738}
739
John Harrisonbc0dce32015-03-19 12:30:07 +0000740/*
741 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
John Harrisonae707972015-05-29 17:44:14 +0100742 * @request: Request to advance the logical ringbuffer of.
John Harrisonbc0dce32015-03-19 12:30:07 +0000743 *
744 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
745 * really happens during submission is that the context and current tail will be placed
746 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
747 * point, the tail *inside* the context is updated and the ELSP written to.
748 */
Chris Wilson7c17d372016-01-20 15:43:35 +0200749static int
John Harrisonae707972015-05-29 17:44:14 +0100750intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
John Harrisonbc0dce32015-03-19 12:30:07 +0000751{
Chris Wilson7e37f882016-08-02 22:50:21 +0100752 struct intel_ring *ring = request->ring;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000753 struct intel_engine_cs *engine = request->engine;
John Harrisonbc0dce32015-03-19 12:30:07 +0000754
Chris Wilson1dae2df2016-08-02 22:50:19 +0100755 intel_ring_advance(ring);
756 request->tail = ring->tail;
John Harrisonbc0dce32015-03-19 12:30:07 +0000757
Chris Wilson7c17d372016-01-20 15:43:35 +0200758 /*
759 * Here we add two extra NOOPs as padding to avoid
760 * lite restore of a context with HEAD==TAIL.
761 *
762 * Caller must reserve WA_TAIL_DWORDS for us!
763 */
Chris Wilson1dae2df2016-08-02 22:50:19 +0100764 intel_ring_emit(ring, MI_NOOP);
765 intel_ring_emit(ring, MI_NOOP);
766 intel_ring_advance(ring);
Alex Daid1675192015-08-12 15:43:43 +0100767
Chris Wilsona16a4052016-04-28 09:56:56 +0100768 /* We keep the previous context alive until we retire the following
769 * request. This ensures that any the context object is still pinned
770 * for any residual writes the HW makes into it on the context switch
771 * into the next object following the breadcrumb. Otherwise, we may
772 * retire the context too early.
773 */
774 request->previous_context = engine->last_context;
775 engine->last_context = request->ctx;
Tvrtko Ursulinf4e2dec2016-01-28 10:29:57 +0000776
Dave Gordon7c2c2702016-05-13 15:36:32 +0100777 if (i915.enable_guc_submission)
778 i915_guc_submit(request);
Alex Daid1675192015-08-12 15:43:43 +0100779 else
780 execlists_context_queue(request);
Chris Wilson7c17d372016-01-20 15:43:35 +0200781
782 return 0;
John Harrisonbc0dce32015-03-19 12:30:07 +0000783}
784
Oscar Mateo73e4d072014-07-24 17:04:48 +0100785/**
Daniel Vetter6e5248b2016-07-15 21:48:06 +0200786 * intel_execlists_submission() - submit a batchbuffer for execution, Execlists style
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100787 * @params: execbuffer call parameters.
Oscar Mateo73e4d072014-07-24 17:04:48 +0100788 * @args: execbuffer call arguments.
789 * @vmas: list of vmas.
Oscar Mateo73e4d072014-07-24 17:04:48 +0100790 *
791 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
792 * away the submission details of the execbuffer ioctl call.
793 *
794 * Return: non-zero if the submission fails.
795 */
John Harrison5f19e2b2015-05-29 17:43:27 +0100796int intel_execlists_submission(struct i915_execbuffer_params *params,
Oscar Mateo454afeb2014-07-24 17:04:22 +0100797 struct drm_i915_gem_execbuffer2 *args,
John Harrison5f19e2b2015-05-29 17:43:27 +0100798 struct list_head *vmas)
Oscar Mateo454afeb2014-07-24 17:04:22 +0100799{
John Harrison5f19e2b2015-05-29 17:43:27 +0100800 struct drm_device *dev = params->dev;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000801 struct intel_engine_cs *engine = params->engine;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100802 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson7e37f882016-08-02 22:50:21 +0100803 struct intel_ring *ring = params->request->ring;
John Harrison5f19e2b2015-05-29 17:43:27 +0100804 u64 exec_start;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100805 int instp_mode;
806 u32 instp_mask;
807 int ret;
808
809 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
810 instp_mask = I915_EXEC_CONSTANTS_MASK;
811 switch (instp_mode) {
812 case I915_EXEC_CONSTANTS_REL_GENERAL:
813 case I915_EXEC_CONSTANTS_ABSOLUTE:
814 case I915_EXEC_CONSTANTS_REL_SURFACE:
Chris Wilson1dae2df2016-08-02 22:50:19 +0100815 if (instp_mode != 0 && engine->id != RCS) {
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100816 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
817 return -EINVAL;
818 }
819
820 if (instp_mode != dev_priv->relative_constants_mode) {
821 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
822 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
823 return -EINVAL;
824 }
825
826 /* The HW changed the meaning on this bit on gen6 */
827 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
828 }
829 break;
830 default:
831 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
832 return -EINVAL;
833 }
834
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100835 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
836 DRM_DEBUG("sol reset is gen7 only\n");
837 return -EINVAL;
838 }
839
John Harrison535fbe82015-05-29 17:43:32 +0100840 ret = execlists_move_to_gpu(params->request, vmas);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100841 if (ret)
842 return ret;
843
Chris Wilson1dae2df2016-08-02 22:50:19 +0100844 if (engine->id == RCS &&
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100845 instp_mode != dev_priv->relative_constants_mode) {
Chris Wilson987046a2016-04-28 09:56:46 +0100846 ret = intel_ring_begin(params->request, 4);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100847 if (ret)
848 return ret;
849
Chris Wilson1dae2df2016-08-02 22:50:19 +0100850 intel_ring_emit(ring, MI_NOOP);
851 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
852 intel_ring_emit_reg(ring, INSTPM);
853 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
854 intel_ring_advance(ring);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100855
856 dev_priv->relative_constants_mode = instp_mode;
857 }
858
John Harrison5f19e2b2015-05-29 17:43:27 +0100859 exec_start = params->batch_obj_vm_offset +
860 args->batch_start_offset;
861
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000862 ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100863 if (ret)
864 return ret;
865
John Harrison95c24162015-05-29 17:43:31 +0100866 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
John Harrison5e4be7b2015-02-13 11:48:11 +0000867
John Harrison8a8edb52015-05-29 17:43:33 +0100868 i915_gem_execbuffer_move_to_active(vmas, params->request);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100869
Oscar Mateo454afeb2014-07-24 17:04:22 +0100870 return 0;
871}
872
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100873void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000874{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000875 struct drm_i915_gem_request *req, *tmp;
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100876 LIST_HEAD(cancel_list);
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000877
Chris Wilson91c8a322016-07-05 10:40:23 +0100878 WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000879
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100880 spin_lock_bh(&engine->execlist_lock);
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100881 list_replace_init(&engine->execlist_queue, &cancel_list);
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100882 spin_unlock_bh(&engine->execlist_lock);
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000883
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100884 list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000885 list_del(&req->execlist_link);
Chris Wilsone8a261e2016-07-20 13:31:49 +0100886 i915_gem_request_put(req);
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000887 }
888}
889
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000890void intel_logical_ring_stop(struct intel_engine_cs *engine)
Oscar Mateo454afeb2014-07-24 17:04:22 +0100891{
Chris Wilsonc0336662016-05-06 15:40:21 +0100892 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100893 int ret;
894
Tvrtko Ursulin117897f2016-03-16 11:00:40 +0000895 if (!intel_engine_initialized(engine))
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100896 return;
897
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000898 ret = intel_engine_idle(engine);
Chris Wilsonf4457ae2016-04-13 17:35:08 +0100899 if (ret)
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100900 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000901 engine->name, ret);
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100902
903 /* TODO: Is this correct with Execlists enabled? */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000904 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
Chris Wilson3e7941a2016-06-30 15:33:23 +0100905 if (intel_wait_for_register(dev_priv,
906 RING_MI_MODE(engine->mmio_base),
907 MODE_IDLE, MODE_IDLE,
908 1000)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000909 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100910 return;
911 }
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000912 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
Oscar Mateo454afeb2014-07-24 17:04:22 +0100913}
914
Chris Wilsone2efd132016-05-24 14:53:34 +0100915static int intel_lr_context_pin(struct i915_gem_context *ctx,
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100916 struct intel_engine_cs *engine)
Oscar Mateodcb4c122014-11-13 10:28:10 +0000917{
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100918 struct drm_i915_private *dev_priv = ctx->i915;
Chris Wilson9021ad02016-05-24 14:53:37 +0100919 struct intel_context *ce = &ctx->engine[engine->id];
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100920 void *vaddr;
921 u32 *lrc_reg_state;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000922 int ret;
Oscar Mateodcb4c122014-11-13 10:28:10 +0000923
Chris Wilson91c8a322016-07-05 10:40:23 +0100924 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000925
Chris Wilson9021ad02016-05-24 14:53:37 +0100926 if (ce->pin_count++)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100927 return 0;
928
Chris Wilson9021ad02016-05-24 14:53:37 +0100929 ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
930 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
Nick Hoathe84fe802015-09-11 12:53:46 +0100931 if (ret)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100932 goto err;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000933
Chris Wilson9021ad02016-05-24 14:53:37 +0100934 vaddr = i915_gem_object_pin_map(ce->state);
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100935 if (IS_ERR(vaddr)) {
936 ret = PTR_ERR(vaddr);
Tvrtko Ursulin82352e92016-01-15 17:12:45 +0000937 goto unpin_ctx_obj;
938 }
939
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100940 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
941
Chris Wilsonaad29fb2016-08-02 22:50:23 +0100942 ret = intel_ring_pin(ce->ring);
Nick Hoathe84fe802015-09-11 12:53:46 +0100943 if (ret)
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100944 goto unpin_map;
Alex Daid1675192015-08-12 15:43:43 +0100945
Chris Wilson9021ad02016-05-24 14:53:37 +0100946 ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000947 intel_lr_context_descriptor_update(ctx, engine);
Chris Wilson9021ad02016-05-24 14:53:37 +0100948
Chris Wilsondca33ec2016-08-02 22:50:20 +0100949 lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ring->vma->node.start;
Chris Wilson9021ad02016-05-24 14:53:37 +0100950 ce->lrc_reg_state = lrc_reg_state;
951 ce->state->dirty = true;
Daniel Vettere93c28f2015-09-02 14:33:42 +0200952
Nick Hoathe84fe802015-09-11 12:53:46 +0100953 /* Invalidate GuC TLB. */
954 if (i915.enable_guc_submission)
955 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
Oscar Mateodcb4c122014-11-13 10:28:10 +0000956
Chris Wilson9a6feaf2016-07-20 13:31:50 +0100957 i915_gem_context_get(ctx);
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100958 return 0;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000959
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100960unpin_map:
Chris Wilson9021ad02016-05-24 14:53:37 +0100961 i915_gem_object_unpin_map(ce->state);
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000962unpin_ctx_obj:
Chris Wilson9021ad02016-05-24 14:53:37 +0100963 i915_gem_object_ggtt_unpin(ce->state);
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100964err:
Chris Wilson9021ad02016-05-24 14:53:37 +0100965 ce->pin_count = 0;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000966 return ret;
Oscar Mateodcb4c122014-11-13 10:28:10 +0000967}
968
Chris Wilsone2efd132016-05-24 14:53:34 +0100969void intel_lr_context_unpin(struct i915_gem_context *ctx,
Tvrtko Ursuline52928232016-01-28 10:29:54 +0000970 struct intel_engine_cs *engine)
Oscar Mateodcb4c122014-11-13 10:28:10 +0000971{
Chris Wilson9021ad02016-05-24 14:53:37 +0100972 struct intel_context *ce = &ctx->engine[engine->id];
Daniel Vetteraf3302b2015-12-04 17:27:15 +0100973
Chris Wilson91c8a322016-07-05 10:40:23 +0100974 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
Chris Wilson9021ad02016-05-24 14:53:37 +0100975 GEM_BUG_ON(ce->pin_count == 0);
Tvrtko Ursulin321fe302016-01-28 10:29:55 +0000976
Chris Wilson9021ad02016-05-24 14:53:37 +0100977 if (--ce->pin_count)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100978 return;
979
Chris Wilsonaad29fb2016-08-02 22:50:23 +0100980 intel_ring_unpin(ce->ring);
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100981
Chris Wilson9021ad02016-05-24 14:53:37 +0100982 i915_gem_object_unpin_map(ce->state);
983 i915_gem_object_ggtt_unpin(ce->state);
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100984
Chris Wilson9021ad02016-05-24 14:53:37 +0100985 ce->lrc_vma = NULL;
986 ce->lrc_desc = 0;
987 ce->lrc_reg_state = NULL;
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100988
Chris Wilson9a6feaf2016-07-20 13:31:50 +0100989 i915_gem_context_put(ctx);
Oscar Mateodcb4c122014-11-13 10:28:10 +0000990}
991
John Harrisone2be4fa2015-05-29 17:43:54 +0100992static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
Michel Thierry771b9a52014-11-11 16:47:33 +0000993{
994 int ret, i;
Chris Wilson7e37f882016-08-02 22:50:21 +0100995 struct intel_ring *ring = req->ring;
Chris Wilsonc0336662016-05-06 15:40:21 +0100996 struct i915_workarounds *w = &req->i915->workarounds;
Michel Thierry771b9a52014-11-11 16:47:33 +0000997
Boyer, Waynecd7feaa2016-01-06 17:15:29 -0800998 if (w->count == 0)
Michel Thierry771b9a52014-11-11 16:47:33 +0000999 return 0;
1000
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001001 ret = req->engine->emit_flush(req, EMIT_BARRIER);
Michel Thierry771b9a52014-11-11 16:47:33 +00001002 if (ret)
1003 return ret;
1004
Chris Wilson987046a2016-04-28 09:56:46 +01001005 ret = intel_ring_begin(req, w->count * 2 + 2);
Michel Thierry771b9a52014-11-11 16:47:33 +00001006 if (ret)
1007 return ret;
1008
Chris Wilson1dae2df2016-08-02 22:50:19 +01001009 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
Michel Thierry771b9a52014-11-11 16:47:33 +00001010 for (i = 0; i < w->count; i++) {
Chris Wilson1dae2df2016-08-02 22:50:19 +01001011 intel_ring_emit_reg(ring, w->reg[i].addr);
1012 intel_ring_emit(ring, w->reg[i].value);
Michel Thierry771b9a52014-11-11 16:47:33 +00001013 }
Chris Wilson1dae2df2016-08-02 22:50:19 +01001014 intel_ring_emit(ring, MI_NOOP);
Michel Thierry771b9a52014-11-11 16:47:33 +00001015
Chris Wilson1dae2df2016-08-02 22:50:19 +01001016 intel_ring_advance(ring);
Michel Thierry771b9a52014-11-11 16:47:33 +00001017
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001018 ret = req->engine->emit_flush(req, EMIT_BARRIER);
Michel Thierry771b9a52014-11-11 16:47:33 +00001019 if (ret)
1020 return ret;
1021
1022 return 0;
1023}
1024
Arun Siluvery83b8a982015-07-08 10:27:05 +01001025#define wa_ctx_emit(batch, index, cmd) \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001026 do { \
Arun Siluvery83b8a982015-07-08 10:27:05 +01001027 int __index = (index)++; \
1028 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001029 return -ENOSPC; \
1030 } \
Arun Siluvery83b8a982015-07-08 10:27:05 +01001031 batch[__index] = (cmd); \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001032 } while (0)
1033
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001034#define wa_ctx_emit_reg(batch, index, reg) \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001035 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
Arun Siluvery9e000842015-07-03 14:27:31 +01001036
1037/*
1038 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1039 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1040 * but there is a slight complication as this is applied in WA batch where the
1041 * values are only initialized once so we cannot take register value at the
1042 * beginning and reuse it further; hence we save its value to memory, upload a
1043 * constant value with bit21 set and then we restore it back with the saved value.
1044 * To simplify the WA, a constant value is formed by using the default value
1045 * of this register. This shouldn't be a problem because we are only modifying
1046 * it for a short period and this batch in non-premptible. We can ofcourse
1047 * use additional instructions that read the actual value of the register
1048 * at that time and set our bit of interest but it makes the WA complicated.
1049 *
1050 * This WA is also required for Gen9 so extracting as a function avoids
1051 * code duplication.
1052 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001053static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001054 uint32_t *batch,
Arun Siluvery9e000842015-07-03 14:27:31 +01001055 uint32_t index)
1056{
1057 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1058
Arun Siluverya4106a72015-07-14 15:01:29 +01001059 /*
Mika Kuoppalafe905812016-06-07 17:19:03 +03001060 * WaDisableLSQCROPERFforOCL:skl,kbl
Arun Siluverya4106a72015-07-14 15:01:29 +01001061 * This WA is implemented in skl_init_clock_gating() but since
1062 * this batch updates GEN8_L3SQCREG4 with default value we need to
1063 * set this bit here to retain the WA during flush.
1064 */
Mika Kuoppalafe905812016-06-07 17:19:03 +03001065 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0) ||
1066 IS_KBL_REVID(engine->i915, 0, KBL_REVID_E0))
Arun Siluverya4106a72015-07-14 15:01:29 +01001067 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1068
Arun Siluveryf1afe242015-08-04 16:22:20 +01001069 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
Arun Siluvery83b8a982015-07-08 10:27:05 +01001070 MI_SRM_LRM_GLOBAL_GTT));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001071 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001072 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001073 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001074
Arun Siluvery83b8a982015-07-08 10:27:05 +01001075 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001076 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001077 wa_ctx_emit(batch, index, l3sqc4_flush);
Arun Siluvery9e000842015-07-03 14:27:31 +01001078
Arun Siluvery83b8a982015-07-08 10:27:05 +01001079 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1080 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
1081 PIPE_CONTROL_DC_FLUSH_ENABLE));
1082 wa_ctx_emit(batch, index, 0);
1083 wa_ctx_emit(batch, index, 0);
1084 wa_ctx_emit(batch, index, 0);
1085 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001086
Arun Siluveryf1afe242015-08-04 16:22:20 +01001087 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
Arun Siluvery83b8a982015-07-08 10:27:05 +01001088 MI_SRM_LRM_GLOBAL_GTT));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001089 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001090 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001091 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001092
1093 return index;
1094}
1095
Arun Siluvery17ee9502015-06-19 19:07:01 +01001096static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
1097 uint32_t offset,
1098 uint32_t start_alignment)
1099{
1100 return wa_ctx->offset = ALIGN(offset, start_alignment);
1101}
1102
1103static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1104 uint32_t offset,
1105 uint32_t size_alignment)
1106{
1107 wa_ctx->size = offset - wa_ctx->offset;
1108
1109 WARN(wa_ctx->size % size_alignment,
1110 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1111 wa_ctx->size, size_alignment);
1112 return 0;
1113}
1114
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001115/*
1116 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1117 * initialized at the beginning and shared across all contexts but this field
1118 * helps us to have multiple batches at different offsets and select them based
1119 * on a criteria. At the moment this batch always start at the beginning of the page
1120 * and at this point we don't have multiple wa_ctx batch buffers.
Arun Siluvery17ee9502015-06-19 19:07:01 +01001121 *
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001122 * The number of WA applied are not known at the beginning; we use this field
1123 * to return the no of DWORDS written.
Arun Siluvery17ee9502015-06-19 19:07:01 +01001124 *
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001125 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1126 * so it adds NOOPs as padding to make it cacheline aligned.
1127 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1128 * makes a complete batch buffer.
Arun Siluvery17ee9502015-06-19 19:07:01 +01001129 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001130static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001131 struct i915_wa_ctx_bb *wa_ctx,
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001132 uint32_t *batch,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001133 uint32_t *offset)
1134{
Arun Siluvery0160f052015-06-23 15:46:57 +01001135 uint32_t scratch_addr;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001136 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1137
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001138 /* WaDisableCtxRestoreArbitration:bdw,chv */
Arun Siluvery83b8a982015-07-08 10:27:05 +01001139 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001140
Arun Siluveryc82435b2015-06-19 18:37:13 +01001141 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
Chris Wilsonc0336662016-05-06 15:40:21 +01001142 if (IS_BROADWELL(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001143 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
Andrzej Hajda604ef732015-09-21 15:33:35 +02001144 if (rc < 0)
1145 return rc;
1146 index = rc;
Arun Siluveryc82435b2015-06-19 18:37:13 +01001147 }
1148
Arun Siluvery0160f052015-06-23 15:46:57 +01001149 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1150 /* Actual scratch location is at 128 bytes offset */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001151 scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
Arun Siluvery0160f052015-06-23 15:46:57 +01001152
Arun Siluvery83b8a982015-07-08 10:27:05 +01001153 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1154 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1155 PIPE_CONTROL_GLOBAL_GTT_IVB |
1156 PIPE_CONTROL_CS_STALL |
1157 PIPE_CONTROL_QW_WRITE));
1158 wa_ctx_emit(batch, index, scratch_addr);
1159 wa_ctx_emit(batch, index, 0);
1160 wa_ctx_emit(batch, index, 0);
1161 wa_ctx_emit(batch, index, 0);
Arun Siluvery0160f052015-06-23 15:46:57 +01001162
Arun Siluvery17ee9502015-06-19 19:07:01 +01001163 /* Pad to end of cacheline */
1164 while (index % CACHELINE_DWORDS)
Arun Siluvery83b8a982015-07-08 10:27:05 +01001165 wa_ctx_emit(batch, index, MI_NOOP);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001166
1167 /*
1168 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1169 * execution depends on the length specified in terms of cache lines
1170 * in the register CTX_RCS_INDIRECT_CTX
1171 */
1172
1173 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1174}
1175
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001176/*
1177 * This batch is started immediately after indirect_ctx batch. Since we ensure
1178 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
Arun Siluvery17ee9502015-06-19 19:07:01 +01001179 *
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001180 * The number of DWORDS written are returned using this field.
Arun Siluvery17ee9502015-06-19 19:07:01 +01001181 *
1182 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1183 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1184 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001185static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001186 struct i915_wa_ctx_bb *wa_ctx,
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001187 uint32_t *batch,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001188 uint32_t *offset)
1189{
1190 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1191
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001192 /* WaDisableCtxRestoreArbitration:bdw,chv */
Arun Siluvery83b8a982015-07-08 10:27:05 +01001193 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001194
Arun Siluvery83b8a982015-07-08 10:27:05 +01001195 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001196
1197 return wa_ctx_end(wa_ctx, *offset = index, 1);
1198}
1199
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001200static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001201 struct i915_wa_ctx_bb *wa_ctx,
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001202 uint32_t *batch,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001203 uint32_t *offset)
1204{
Arun Siluverya4106a72015-07-14 15:01:29 +01001205 int ret;
Arun Siluvery0504cff2015-07-14 15:01:27 +01001206 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1207
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001208 /* WaDisableCtxRestoreArbitration:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001209 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1210 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001211 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
Arun Siluvery0504cff2015-07-14 15:01:27 +01001212
Arun Siluverya4106a72015-07-14 15:01:29 +01001213 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001214 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
Arun Siluverya4106a72015-07-14 15:01:29 +01001215 if (ret < 0)
1216 return ret;
1217 index = ret;
1218
Mika Kuoppala873e8172016-07-20 14:26:13 +03001219 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */
1220 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1221 wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
1222 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
1223 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
1224 wa_ctx_emit(batch, index, MI_NOOP);
1225
Mika Kuoppala066d4622016-06-07 17:19:15 +03001226 /* WaClearSlmSpaceAtContextSwitch:kbl */
1227 /* Actual scratch location is at 128 bytes offset */
1228 if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
1229 uint32_t scratch_addr
1230 = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
1231
1232 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1233 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1234 PIPE_CONTROL_GLOBAL_GTT_IVB |
1235 PIPE_CONTROL_CS_STALL |
1236 PIPE_CONTROL_QW_WRITE));
1237 wa_ctx_emit(batch, index, scratch_addr);
1238 wa_ctx_emit(batch, index, 0);
1239 wa_ctx_emit(batch, index, 0);
1240 wa_ctx_emit(batch, index, 0);
1241 }
Tim Gore3485d992016-07-05 10:01:30 +01001242
1243 /* WaMediaPoolStateCmdInWABB:bxt */
1244 if (HAS_POOLED_EU(engine->i915)) {
1245 /*
1246 * EU pool configuration is setup along with golden context
1247 * during context initialization. This value depends on
1248 * device type (2x6 or 3x6) and needs to be updated based
1249 * on which subslice is disabled especially for 2x6
1250 * devices, however it is safe to load default
1251 * configuration of 3x6 device instead of masking off
1252 * corresponding bits because HW ignores bits of a disabled
1253 * subslice and drops down to appropriate config. Please
1254 * see render_state_setup() in i915_gem_render_state.c for
1255 * possible configurations, to avoid duplication they are
1256 * not shown here again.
1257 */
1258 u32 eu_pool_config = 0x00777000;
1259 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
1260 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
1261 wa_ctx_emit(batch, index, eu_pool_config);
1262 wa_ctx_emit(batch, index, 0);
1263 wa_ctx_emit(batch, index, 0);
1264 wa_ctx_emit(batch, index, 0);
1265 }
1266
Arun Siluvery0504cff2015-07-14 15:01:27 +01001267 /* Pad to end of cacheline */
1268 while (index % CACHELINE_DWORDS)
1269 wa_ctx_emit(batch, index, MI_NOOP);
1270
1271 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1272}
1273
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001274static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001275 struct i915_wa_ctx_bb *wa_ctx,
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001276 uint32_t *batch,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001277 uint32_t *offset)
1278{
1279 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1280
Arun Siluvery9b014352015-07-14 15:01:30 +01001281 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001282 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1283 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
Arun Siluvery9b014352015-07-14 15:01:30 +01001284 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001285 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
Arun Siluvery9b014352015-07-14 15:01:30 +01001286 wa_ctx_emit(batch, index,
1287 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1288 wa_ctx_emit(batch, index, MI_NOOP);
1289 }
1290
Tim Goreb1e429f2016-03-21 14:37:29 +00001291 /* WaClearTdlStateAckDirtyBits:bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001292 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
Tim Goreb1e429f2016-03-21 14:37:29 +00001293 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1294
1295 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
1296 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1297
1298 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
1299 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1300
1301 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
1302 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1303
1304 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
1305 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
1306 wa_ctx_emit(batch, index, 0x0);
1307 wa_ctx_emit(batch, index, MI_NOOP);
1308 }
1309
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001310 /* WaDisableCtxRestoreArbitration:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001311 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1312 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001313 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1314
Arun Siluvery0504cff2015-07-14 15:01:27 +01001315 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1316
1317 return wa_ctx_end(wa_ctx, *offset = index, 1);
1318}
1319
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001320static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001321{
1322 int ret;
1323
Chris Wilson91c8a322016-07-05 10:40:23 +01001324 engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
1325 PAGE_ALIGN(size));
Chris Wilsonfe3db792016-04-25 13:32:13 +01001326 if (IS_ERR(engine->wa_ctx.obj)) {
Arun Siluvery17ee9502015-06-19 19:07:01 +01001327 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
Chris Wilsonfe3db792016-04-25 13:32:13 +01001328 ret = PTR_ERR(engine->wa_ctx.obj);
1329 engine->wa_ctx.obj = NULL;
1330 return ret;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001331 }
1332
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001333 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001334 if (ret) {
1335 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1336 ret);
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001337 i915_gem_object_put(engine->wa_ctx.obj);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001338 return ret;
1339 }
1340
1341 return 0;
1342}
1343
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001344static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001345{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001346 if (engine->wa_ctx.obj) {
1347 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001348 i915_gem_object_put(engine->wa_ctx.obj);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001349 engine->wa_ctx.obj = NULL;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001350 }
1351}
1352
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001353static int intel_init_workaround_bb(struct intel_engine_cs *engine)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001354{
1355 int ret;
1356 uint32_t *batch;
1357 uint32_t offset;
1358 struct page *page;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001359 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001360
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001361 WARN_ON(engine->id != RCS);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001362
Arun Siluvery5e60d792015-06-23 15:50:44 +01001363 /* update this when WA for higher Gen are added */
Chris Wilsonc0336662016-05-06 15:40:21 +01001364 if (INTEL_GEN(engine->i915) > 9) {
Arun Siluvery0504cff2015-07-14 15:01:27 +01001365 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
Chris Wilsonc0336662016-05-06 15:40:21 +01001366 INTEL_GEN(engine->i915));
Arun Siluvery5e60d792015-06-23 15:50:44 +01001367 return 0;
Arun Siluvery0504cff2015-07-14 15:01:27 +01001368 }
Arun Siluvery5e60d792015-06-23 15:50:44 +01001369
Arun Siluveryc4db7592015-06-19 18:37:11 +01001370 /* some WA perform writes to scratch page, ensure it is valid */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001371 if (engine->scratch.obj == NULL) {
1372 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
Arun Siluveryc4db7592015-06-19 18:37:11 +01001373 return -EINVAL;
1374 }
1375
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001376 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001377 if (ret) {
1378 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1379 return ret;
1380 }
1381
Dave Gordon033908a2015-12-10 18:51:23 +00001382 page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001383 batch = kmap_atomic(page);
1384 offset = 0;
1385
Chris Wilsonc0336662016-05-06 15:40:21 +01001386 if (IS_GEN8(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001387 ret = gen8_init_indirectctx_bb(engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001388 &wa_ctx->indirect_ctx,
1389 batch,
1390 &offset);
1391 if (ret)
1392 goto out;
1393
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001394 ret = gen8_init_perctx_bb(engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001395 &wa_ctx->per_ctx,
1396 batch,
1397 &offset);
1398 if (ret)
1399 goto out;
Chris Wilsonc0336662016-05-06 15:40:21 +01001400 } else if (IS_GEN9(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001401 ret = gen9_init_indirectctx_bb(engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001402 &wa_ctx->indirect_ctx,
1403 batch,
1404 &offset);
1405 if (ret)
1406 goto out;
1407
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001408 ret = gen9_init_perctx_bb(engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001409 &wa_ctx->per_ctx,
1410 batch,
1411 &offset);
1412 if (ret)
1413 goto out;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001414 }
1415
1416out:
1417 kunmap_atomic(batch);
1418 if (ret)
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001419 lrc_destroy_wa_ctx_obj(engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001420
1421 return ret;
1422}
1423
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001424static void lrc_init_hws(struct intel_engine_cs *engine)
1425{
Chris Wilsonc0336662016-05-06 15:40:21 +01001426 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001427
1428 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1429 (u32)engine->status_page.gfx_addr);
1430 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1431}
1432
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001433static int gen8_init_common_ring(struct intel_engine_cs *engine)
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001434{
Chris Wilsonc0336662016-05-06 15:40:21 +01001435 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +00001436 unsigned int next_context_status_buffer_hw;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001437
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001438 lrc_init_hws(engine);
Nick Hoathe84fe802015-09-11 12:53:46 +01001439
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001440 I915_WRITE_IMR(engine,
1441 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1442 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
Oscar Mateo73d477f2014-07-24 17:04:31 +01001443
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001444 I915_WRITE(RING_MODE_GEN7(engine),
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001445 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1446 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001447 POSTING_READ(RING_MODE_GEN7(engine));
Michel Thierrydfc53c52015-09-28 13:25:12 +01001448
1449 /*
1450 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1451 * zero, we need to read the write pointer from hardware and use its
1452 * value because "this register is power context save restored".
1453 * Effectively, these states have been observed:
1454 *
1455 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1456 * BDW | CSB regs not reset | CSB regs reset |
1457 * CHT | CSB regs not reset | CSB regs not reset |
Ben Widawsky5590a5f2016-01-05 10:30:05 -08001458 * SKL | ? | ? |
1459 * BXT | ? | ? |
Michel Thierrydfc53c52015-09-28 13:25:12 +01001460 */
Ben Widawsky5590a5f2016-01-05 10:30:05 -08001461 next_context_status_buffer_hw =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001462 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
Michel Thierrydfc53c52015-09-28 13:25:12 +01001463
1464 /*
1465 * When the CSB registers are reset (also after power-up / gpu reset),
1466 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1467 * this special case, so the first element read is CSB[0].
1468 */
1469 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1470 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1471
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001472 engine->next_context_status_buffer = next_context_status_buffer_hw;
1473 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001474
Tomas Elffc0768c2016-03-21 16:26:59 +00001475 intel_engine_init_hangcheck(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001476
Peter Antoine0ccdacf2016-04-13 15:03:25 +01001477 return intel_mocs_init_engine(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001478}
1479
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001480static int gen8_init_render_ring(struct intel_engine_cs *engine)
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001481{
Chris Wilsonc0336662016-05-06 15:40:21 +01001482 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001483 int ret;
1484
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001485 ret = gen8_init_common_ring(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001486 if (ret)
1487 return ret;
1488
1489 /* We need to disable the AsyncFlip performance optimisations in order
1490 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1491 * programmed to '1' on all products.
1492 *
1493 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1494 */
1495 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1496
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001497 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1498
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001499 return init_workarounds_ring(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001500}
1501
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001502static int gen9_init_render_ring(struct intel_engine_cs *engine)
Damien Lespiau82ef8222015-02-09 19:33:08 +00001503{
1504 int ret;
1505
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001506 ret = gen8_init_common_ring(engine);
Damien Lespiau82ef8222015-02-09 19:33:08 +00001507 if (ret)
1508 return ret;
1509
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001510 return init_workarounds_ring(engine);
Damien Lespiau82ef8222015-02-09 19:33:08 +00001511}
1512
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001513static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1514{
1515 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
Chris Wilson7e37f882016-08-02 22:50:21 +01001516 struct intel_ring *ring = req->ring;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001517 struct intel_engine_cs *engine = req->engine;
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001518 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1519 int i, ret;
1520
Chris Wilson987046a2016-04-28 09:56:46 +01001521 ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001522 if (ret)
1523 return ret;
1524
Chris Wilsonb5321f32016-08-02 22:50:18 +01001525 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001526 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1527 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1528
Chris Wilsonb5321f32016-08-02 22:50:18 +01001529 intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
1530 intel_ring_emit(ring, upper_32_bits(pd_daddr));
1531 intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
1532 intel_ring_emit(ring, lower_32_bits(pd_daddr));
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001533 }
1534
Chris Wilsonb5321f32016-08-02 22:50:18 +01001535 intel_ring_emit(ring, MI_NOOP);
1536 intel_ring_advance(ring);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001537
1538 return 0;
1539}
1540
John Harrisonbe795fc2015-05-29 17:44:03 +01001541static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
John Harrison8e004ef2015-02-13 11:48:10 +00001542 u64 offset, unsigned dispatch_flags)
Oscar Mateo15648582014-07-24 17:04:32 +01001543{
Chris Wilson7e37f882016-08-02 22:50:21 +01001544 struct intel_ring *ring = req->ring;
John Harrison8e004ef2015-02-13 11:48:10 +00001545 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
Oscar Mateo15648582014-07-24 17:04:32 +01001546 int ret;
1547
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001548 /* Don't rely in hw updating PDPs, specially in lite-restore.
1549 * Ideally, we should set Force PD Restore in ctx descriptor,
1550 * but we can't. Force Restore would be a second option, but
1551 * it is unsafe in case of lite-restore (because the ctx is
Michel Thierry2dba3232015-07-30 11:06:23 +01001552 * not idle). PML4 is allocated during ppgtt init so this is
1553 * not needed in 48-bit.*/
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001554 if (req->ctx->ppgtt &&
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001555 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001556 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
Chris Wilsonc0336662016-05-06 15:40:21 +01001557 !intel_vgpu_active(req->i915)) {
Michel Thierry2dba3232015-07-30 11:06:23 +01001558 ret = intel_logical_ring_emit_pdps(req);
1559 if (ret)
1560 return ret;
1561 }
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001562
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001563 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001564 }
1565
Chris Wilson987046a2016-04-28 09:56:46 +01001566 ret = intel_ring_begin(req, 4);
Oscar Mateo15648582014-07-24 17:04:32 +01001567 if (ret)
1568 return ret;
1569
1570 /* FIXME(BDW): Address space and security selectors. */
Chris Wilsonb5321f32016-08-02 22:50:18 +01001571 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
1572 (ppgtt<<8) |
1573 (dispatch_flags & I915_DISPATCH_RS ?
1574 MI_BATCH_RESOURCE_STREAMER : 0));
1575 intel_ring_emit(ring, lower_32_bits(offset));
1576 intel_ring_emit(ring, upper_32_bits(offset));
1577 intel_ring_emit(ring, MI_NOOP);
1578 intel_ring_advance(ring);
Oscar Mateo15648582014-07-24 17:04:32 +01001579
1580 return 0;
1581}
1582
Chris Wilson31bb59c2016-07-01 17:23:27 +01001583static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
Oscar Mateo73d477f2014-07-24 17:04:31 +01001584{
Chris Wilsonc0336662016-05-06 15:40:21 +01001585 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson31bb59c2016-07-01 17:23:27 +01001586 I915_WRITE_IMR(engine,
1587 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1588 POSTING_READ_FW(RING_IMR(engine->mmio_base));
Oscar Mateo73d477f2014-07-24 17:04:31 +01001589}
1590
Chris Wilson31bb59c2016-07-01 17:23:27 +01001591static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
Oscar Mateo73d477f2014-07-24 17:04:31 +01001592{
Chris Wilsonc0336662016-05-06 15:40:21 +01001593 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson31bb59c2016-07-01 17:23:27 +01001594 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
Oscar Mateo73d477f2014-07-24 17:04:31 +01001595}
1596
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001597static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
Oscar Mateo47122742014-07-24 17:04:28 +01001598{
Chris Wilson7e37f882016-08-02 22:50:21 +01001599 struct intel_ring *ring = request->ring;
1600 u32 cmd;
Oscar Mateo47122742014-07-24 17:04:28 +01001601 int ret;
1602
Chris Wilson987046a2016-04-28 09:56:46 +01001603 ret = intel_ring_begin(request, 4);
Oscar Mateo47122742014-07-24 17:04:28 +01001604 if (ret)
1605 return ret;
1606
1607 cmd = MI_FLUSH_DW + 1;
1608
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001609 /* We always require a command barrier so that subsequent
1610 * commands, such as breadcrumb interrupts, are strictly ordered
1611 * wrt the contents of the write cache being flushed to memory
1612 * (and thus being coherent from the CPU).
1613 */
1614 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1615
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001616 if (mode & EMIT_INVALIDATE) {
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001617 cmd |= MI_INVALIDATE_TLB;
Chris Wilson1dae2df2016-08-02 22:50:19 +01001618 if (request->engine->id == VCS)
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001619 cmd |= MI_INVALIDATE_BSD;
Oscar Mateo47122742014-07-24 17:04:28 +01001620 }
1621
Chris Wilsonb5321f32016-08-02 22:50:18 +01001622 intel_ring_emit(ring, cmd);
1623 intel_ring_emit(ring,
1624 I915_GEM_HWS_SCRATCH_ADDR |
1625 MI_FLUSH_DW_USE_GTT);
1626 intel_ring_emit(ring, 0); /* upper addr */
1627 intel_ring_emit(ring, 0); /* value */
1628 intel_ring_advance(ring);
Oscar Mateo47122742014-07-24 17:04:28 +01001629
1630 return 0;
1631}
1632
John Harrison7deb4d32015-05-29 17:43:59 +01001633static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001634 u32 mode)
Oscar Mateo47122742014-07-24 17:04:28 +01001635{
Chris Wilson7e37f882016-08-02 22:50:21 +01001636 struct intel_ring *ring = request->ring;
Chris Wilsonb5321f32016-08-02 22:50:18 +01001637 struct intel_engine_cs *engine = request->engine;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001638 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001639 bool vf_flush_wa = false, dc_flush_wa = false;
Oscar Mateo47122742014-07-24 17:04:28 +01001640 u32 flags = 0;
1641 int ret;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001642 int len;
Oscar Mateo47122742014-07-24 17:04:28 +01001643
1644 flags |= PIPE_CONTROL_CS_STALL;
1645
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001646 if (mode & EMIT_FLUSH) {
Oscar Mateo47122742014-07-24 17:04:28 +01001647 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1648 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
Francisco Jerez965fd602016-01-13 18:59:39 -08001649 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
Chris Wilson40a24482015-08-21 16:08:41 +01001650 flags |= PIPE_CONTROL_FLUSH_ENABLE;
Oscar Mateo47122742014-07-24 17:04:28 +01001651 }
1652
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001653 if (mode & EMIT_INVALIDATE) {
Oscar Mateo47122742014-07-24 17:04:28 +01001654 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1655 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1656 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1657 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1658 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1659 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1660 flags |= PIPE_CONTROL_QW_WRITE;
1661 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
Oscar Mateo47122742014-07-24 17:04:28 +01001662
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001663 /*
1664 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1665 * pipe control.
1666 */
Chris Wilsonc0336662016-05-06 15:40:21 +01001667 if (IS_GEN9(request->i915))
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001668 vf_flush_wa = true;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001669
1670 /* WaForGAMHang:kbl */
1671 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
1672 dc_flush_wa = true;
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001673 }
Imre Deak9647ff32015-01-25 13:27:11 -08001674
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001675 len = 6;
1676
1677 if (vf_flush_wa)
1678 len += 6;
1679
1680 if (dc_flush_wa)
1681 len += 12;
1682
1683 ret = intel_ring_begin(request, len);
Oscar Mateo47122742014-07-24 17:04:28 +01001684 if (ret)
1685 return ret;
1686
Imre Deak9647ff32015-01-25 13:27:11 -08001687 if (vf_flush_wa) {
Chris Wilsonb5321f32016-08-02 22:50:18 +01001688 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1689 intel_ring_emit(ring, 0);
1690 intel_ring_emit(ring, 0);
1691 intel_ring_emit(ring, 0);
1692 intel_ring_emit(ring, 0);
1693 intel_ring_emit(ring, 0);
Imre Deak9647ff32015-01-25 13:27:11 -08001694 }
1695
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001696 if (dc_flush_wa) {
Chris Wilsonb5321f32016-08-02 22:50:18 +01001697 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1698 intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
1699 intel_ring_emit(ring, 0);
1700 intel_ring_emit(ring, 0);
1701 intel_ring_emit(ring, 0);
1702 intel_ring_emit(ring, 0);
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001703 }
1704
Chris Wilsonb5321f32016-08-02 22:50:18 +01001705 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1706 intel_ring_emit(ring, flags);
1707 intel_ring_emit(ring, scratch_addr);
1708 intel_ring_emit(ring, 0);
1709 intel_ring_emit(ring, 0);
1710 intel_ring_emit(ring, 0);
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001711
1712 if (dc_flush_wa) {
Chris Wilsonb5321f32016-08-02 22:50:18 +01001713 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1714 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
1715 intel_ring_emit(ring, 0);
1716 intel_ring_emit(ring, 0);
1717 intel_ring_emit(ring, 0);
1718 intel_ring_emit(ring, 0);
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001719 }
1720
Chris Wilsonb5321f32016-08-02 22:50:18 +01001721 intel_ring_advance(ring);
Oscar Mateo47122742014-07-24 17:04:28 +01001722
1723 return 0;
1724}
1725
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001726static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
Imre Deak319404d2015-08-14 18:35:27 +03001727{
Imre Deak319404d2015-08-14 18:35:27 +03001728 /*
1729 * On BXT A steppings there is a HW coherency issue whereby the
1730 * MI_STORE_DATA_IMM storing the completed request's seqno
1731 * occasionally doesn't invalidate the CPU cache. Work around this by
1732 * clflushing the corresponding cacheline whenever the caller wants
1733 * the coherency to be guaranteed. Note that this cacheline is known
1734 * to be clean at this point, since we only write it in
1735 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1736 * this clflush in practice becomes an invalidate operation.
1737 */
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001738 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
Imre Deak319404d2015-08-14 18:35:27 +03001739}
1740
Chris Wilson7c17d372016-01-20 15:43:35 +02001741/*
1742 * Reserve space for 2 NOOPs at the end of each request to be
1743 * used as a workaround for not being allowed to do lite
1744 * restore with HEAD==TAIL (WaIdleLiteRestore).
1745 */
1746#define WA_TAIL_DWORDS 2
1747
John Harrisonc4e76632015-05-29 17:44:01 +01001748static int gen8_emit_request(struct drm_i915_gem_request *request)
Oscar Mateo4da46e12014-07-24 17:04:27 +01001749{
Chris Wilson7e37f882016-08-02 22:50:21 +01001750 struct intel_ring *ring = request->ring;
Oscar Mateo4da46e12014-07-24 17:04:27 +01001751 int ret;
1752
Chris Wilson987046a2016-04-28 09:56:46 +01001753 ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001754 if (ret)
1755 return ret;
1756
Chris Wilson7c17d372016-01-20 15:43:35 +02001757 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1758 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
Oscar Mateo4da46e12014-07-24 17:04:27 +01001759
Chris Wilsonb5321f32016-08-02 22:50:18 +01001760 intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1761 intel_ring_emit(ring,
1762 intel_hws_seqno_address(request->engine) |
1763 MI_FLUSH_DW_USE_GTT);
1764 intel_ring_emit(ring, 0);
1765 intel_ring_emit(ring, request->fence.seqno);
1766 intel_ring_emit(ring, MI_USER_INTERRUPT);
1767 intel_ring_emit(ring, MI_NOOP);
Chris Wilson7c17d372016-01-20 15:43:35 +02001768 return intel_logical_ring_advance_and_submit(request);
1769}
Oscar Mateo4da46e12014-07-24 17:04:27 +01001770
Chris Wilson7c17d372016-01-20 15:43:35 +02001771static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1772{
Chris Wilson7e37f882016-08-02 22:50:21 +01001773 struct intel_ring *ring = request->ring;
Chris Wilson7c17d372016-01-20 15:43:35 +02001774 int ret;
1775
Chris Wilson987046a2016-04-28 09:56:46 +01001776 ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
Chris Wilson7c17d372016-01-20 15:43:35 +02001777 if (ret)
1778 return ret;
1779
Michał Winiarskice81a652016-04-12 15:51:55 +02001780 /* We're using qword write, seqno should be aligned to 8 bytes. */
1781 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1782
Chris Wilson7c17d372016-01-20 15:43:35 +02001783 /* w/a for post sync ops following a GPGPU operation we
1784 * need a prior CS_STALL, which is emitted by the flush
1785 * following the batch.
Michel Thierry53292cd2015-04-15 18:11:33 +01001786 */
Chris Wilsonb5321f32016-08-02 22:50:18 +01001787 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1788 intel_ring_emit(ring,
1789 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1790 PIPE_CONTROL_CS_STALL |
1791 PIPE_CONTROL_QW_WRITE));
1792 intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
1793 intel_ring_emit(ring, 0);
1794 intel_ring_emit(ring, i915_gem_request_get_seqno(request));
Michał Winiarskice81a652016-04-12 15:51:55 +02001795 /* We're thrashing one dword of HWS. */
Chris Wilsonb5321f32016-08-02 22:50:18 +01001796 intel_ring_emit(ring, 0);
1797 intel_ring_emit(ring, MI_USER_INTERRUPT);
1798 intel_ring_emit(ring, MI_NOOP);
Chris Wilson7c17d372016-01-20 15:43:35 +02001799 return intel_logical_ring_advance_and_submit(request);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001800}
1801
John Harrisonbe013632015-05-29 17:43:45 +01001802static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
Damien Lespiaucef437a2015-02-10 19:32:19 +00001803{
Damien Lespiaucef437a2015-02-10 19:32:19 +00001804 struct render_state so;
Damien Lespiaucef437a2015-02-10 19:32:19 +00001805 int ret;
1806
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001807 ret = i915_gem_render_state_prepare(req->engine, &so);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001808 if (ret)
1809 return ret;
1810
1811 if (so.rodata == NULL)
1812 return 0;
1813
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001814 ret = req->engine->emit_bb_start(req, so.ggtt_offset,
John Harrisonbe013632015-05-29 17:43:45 +01001815 I915_DISPATCH_SECURE);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001816 if (ret)
1817 goto out;
1818
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001819 ret = req->engine->emit_bb_start(req,
Arun Siluvery84e81022015-07-20 10:46:10 +01001820 (so.ggtt_offset + so.aux_batch_offset),
1821 I915_DISPATCH_SECURE);
1822 if (ret)
1823 goto out;
1824
John Harrisonb2af0372015-05-29 17:43:50 +01001825 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001826
Damien Lespiaucef437a2015-02-10 19:32:19 +00001827out:
1828 i915_gem_render_state_fini(&so);
1829 return ret;
1830}
1831
John Harrison87531812015-05-29 17:43:44 +01001832static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
Thomas Daniele7778be2014-12-02 12:50:48 +00001833{
1834 int ret;
1835
John Harrisone2be4fa2015-05-29 17:43:54 +01001836 ret = intel_logical_ring_workarounds_emit(req);
Thomas Daniele7778be2014-12-02 12:50:48 +00001837 if (ret)
1838 return ret;
1839
Peter Antoine3bbaba02015-07-10 20:13:11 +03001840 ret = intel_rcs_context_init_mocs(req);
1841 /*
1842 * Failing to program the MOCS is non-fatal.The system will not
1843 * run at peak performance. So generate an error and carry on.
1844 */
1845 if (ret)
1846 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1847
John Harrisonbe013632015-05-29 17:43:45 +01001848 return intel_lr_context_render_state_init(req);
Thomas Daniele7778be2014-12-02 12:50:48 +00001849}
1850
Oscar Mateo73e4d072014-07-24 17:04:48 +01001851/**
1852 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001853 * @engine: Engine Command Streamer.
Oscar Mateo73e4d072014-07-24 17:04:48 +01001854 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001855void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
Oscar Mateo454afeb2014-07-24 17:04:22 +01001856{
John Harrison6402c332014-10-31 12:00:26 +00001857 struct drm_i915_private *dev_priv;
Oscar Mateo9832b9d2014-07-24 17:04:30 +01001858
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00001859 if (!intel_engine_initialized(engine))
Oscar Mateo48d82382014-07-24 17:04:23 +01001860 return;
1861
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +01001862 /*
1863 * Tasklet cannot be active at this point due intel_mark_active/idle
1864 * so this is just for documentation.
1865 */
1866 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1867 tasklet_kill(&engine->irq_tasklet);
1868
Chris Wilsonc0336662016-05-06 15:40:21 +01001869 dev_priv = engine->i915;
John Harrison6402c332014-10-31 12:00:26 +00001870
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001871 if (engine->buffer) {
1872 intel_logical_ring_stop(engine);
1873 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
Dave Gordonb0366a52015-12-08 15:02:36 +00001874 }
Oscar Mateo48d82382014-07-24 17:04:23 +01001875
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001876 if (engine->cleanup)
1877 engine->cleanup(engine);
Oscar Mateo48d82382014-07-24 17:04:23 +01001878
Chris Wilson33a051a2016-07-27 09:07:26 +01001879 intel_engine_cleanup_cmd_parser(engine);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001880 i915_gem_batch_pool_fini(&engine->batch_pool);
Oscar Mateo48d82382014-07-24 17:04:23 +01001881
Chris Wilson688e6c72016-07-01 17:23:15 +01001882 intel_engine_fini_breadcrumbs(engine);
1883
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001884 if (engine->status_page.obj) {
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001885 i915_gem_object_unpin_map(engine->status_page.obj);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001886 engine->status_page.obj = NULL;
Oscar Mateo48d82382014-07-24 17:04:23 +01001887 }
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001888 intel_lr_context_unpin(dev_priv->kernel_context, engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001889
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001890 engine->idle_lite_restore_wa = 0;
1891 engine->disable_lite_restore_wa = false;
1892 engine->ctx_desc_template = 0;
Tvrtko Ursulinca825802016-01-15 15:10:27 +00001893
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001894 lrc_destroy_wa_ctx_obj(engine);
Chris Wilsonc0336662016-05-06 15:40:21 +01001895 engine->i915 = NULL;
Oscar Mateo454afeb2014-07-24 17:04:22 +01001896}
1897
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001898static void
Chris Wilsone1382ef2016-05-06 15:40:20 +01001899logical_ring_default_vfuncs(struct intel_engine_cs *engine)
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001900{
1901 /* Default vfuncs which can be overriden by each engine. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001902 engine->init_hw = gen8_init_common_ring;
1903 engine->emit_request = gen8_emit_request;
1904 engine->emit_flush = gen8_emit_flush;
Chris Wilson31bb59c2016-07-01 17:23:27 +01001905 engine->irq_enable = gen8_logical_ring_enable_irq;
1906 engine->irq_disable = gen8_logical_ring_disable_irq;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001907 engine->emit_bb_start = gen8_emit_bb_start;
Chris Wilson1b7744e2016-07-01 17:23:17 +01001908 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001909 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001910}
1911
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001912static inline void
Dave Gordonc2c7f242016-07-13 16:03:35 +01001913logical_ring_default_irqs(struct intel_engine_cs *engine)
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001914{
Dave Gordonc2c7f242016-07-13 16:03:35 +01001915 unsigned shift = engine->irq_shift;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001916 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1917 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001918}
1919
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001920static int
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001921lrc_setup_hws(struct intel_engine_cs *engine,
1922 struct drm_i915_gem_object *dctx_obj)
1923{
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001924 void *hws;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001925
1926 /* The HWSP is part of the default context object in LRC mode. */
1927 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
1928 LRC_PPHWSP_PN * PAGE_SIZE;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001929 hws = i915_gem_object_pin_map(dctx_obj);
1930 if (IS_ERR(hws))
1931 return PTR_ERR(hws);
1932 engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001933 engine->status_page.obj = dctx_obj;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001934
1935 return 0;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001936}
1937
Tvrtko Ursulinbb454382016-07-13 16:03:36 +01001938static void
1939logical_ring_setup(struct intel_engine_cs *engine)
1940{
1941 struct drm_i915_private *dev_priv = engine->i915;
1942 enum forcewake_domains fw_domains;
1943
Tvrtko Ursulin019bf272016-07-13 16:03:41 +01001944 intel_engine_setup_common(engine);
1945
Tvrtko Ursulinbb454382016-07-13 16:03:36 +01001946 /* Intentionally left blank. */
1947 engine->buffer = NULL;
1948
1949 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
1950 RING_ELSP(engine),
1951 FW_REG_WRITE);
1952
1953 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1954 RING_CONTEXT_STATUS_PTR(engine),
1955 FW_REG_READ | FW_REG_WRITE);
1956
1957 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1958 RING_CONTEXT_STATUS_BUF_BASE(engine),
1959 FW_REG_READ);
1960
1961 engine->fw_domains = fw_domains;
1962
Tvrtko Ursulinbb454382016-07-13 16:03:36 +01001963 tasklet_init(&engine->irq_tasklet,
1964 intel_lrc_irq_handler, (unsigned long)engine);
1965
1966 logical_ring_init_platform_invariants(engine);
1967 logical_ring_default_vfuncs(engine);
1968 logical_ring_default_irqs(engine);
Tvrtko Ursulinbb454382016-07-13 16:03:36 +01001969}
1970
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001971static int
1972logical_ring_init(struct intel_engine_cs *engine)
1973{
1974 struct i915_gem_context *dctx = engine->i915->kernel_context;
1975 int ret;
1976
Tvrtko Ursulin019bf272016-07-13 16:03:41 +01001977 ret = intel_engine_init_common(engine);
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001978 if (ret)
1979 goto error;
1980
1981 ret = execlists_context_deferred_alloc(dctx, engine);
1982 if (ret)
1983 goto error;
1984
1985 /* As this is the default context, always pin it */
1986 ret = intel_lr_context_pin(dctx, engine);
1987 if (ret) {
1988 DRM_ERROR("Failed to pin context for %s: %d\n",
1989 engine->name, ret);
1990 goto error;
1991 }
1992
1993 /* And setup the hardware status page. */
1994 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
1995 if (ret) {
1996 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
1997 goto error;
1998 }
1999
2000 return 0;
2001
2002error:
2003 intel_logical_ring_cleanup(engine);
2004 return ret;
2005}
2006
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +01002007int logical_render_ring_init(struct intel_engine_cs *engine)
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002008{
2009 struct drm_i915_private *dev_priv = engine->i915;
2010 int ret;
2011
Tvrtko Ursulinbb454382016-07-13 16:03:36 +01002012 logical_ring_setup(engine);
2013
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002014 if (HAS_L3_DPF(dev_priv))
2015 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2016
2017 /* Override some for render ring. */
2018 if (INTEL_GEN(dev_priv) >= 9)
2019 engine->init_hw = gen9_init_render_ring;
2020 else
2021 engine->init_hw = gen8_init_render_ring;
2022 engine->init_context = gen8_init_rcs_context;
2023 engine->cleanup = intel_fini_pipe_control;
2024 engine->emit_flush = gen8_emit_flush_render;
2025 engine->emit_request = gen8_emit_request_render;
2026
Chris Wilson7d5ea802016-07-01 17:23:20 +01002027 ret = intel_init_pipe_control(engine, 4096);
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002028 if (ret)
2029 return ret;
2030
2031 ret = intel_init_workaround_bb(engine);
2032 if (ret) {
2033 /*
2034 * We continue even if we fail to initialize WA batch
2035 * because we only expect rare glitches but nothing
2036 * critical to prevent us from using GPU
2037 */
2038 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2039 ret);
2040 }
2041
2042 ret = logical_ring_init(engine);
2043 if (ret) {
2044 lrc_destroy_wa_ctx_obj(engine);
2045 }
2046
2047 return ret;
2048}
2049
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +01002050int logical_xcs_ring_init(struct intel_engine_cs *engine)
Tvrtko Ursulinbb454382016-07-13 16:03:36 +01002051{
2052 logical_ring_setup(engine);
2053
2054 return logical_ring_init(engine);
2055}
2056
Jeff McGee0cea6502015-02-13 10:27:56 -06002057static u32
Chris Wilsonc0336662016-05-06 15:40:21 +01002058make_rpcs(struct drm_i915_private *dev_priv)
Jeff McGee0cea6502015-02-13 10:27:56 -06002059{
2060 u32 rpcs = 0;
2061
2062 /*
2063 * No explicit RPCS request is needed to ensure full
2064 * slice/subslice/EU enablement prior to Gen9.
2065 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002066 if (INTEL_GEN(dev_priv) < 9)
Jeff McGee0cea6502015-02-13 10:27:56 -06002067 return 0;
2068
2069 /*
2070 * Starting in Gen9, render power gating can leave
2071 * slice/subslice/EU in a partially enabled state. We
2072 * must make an explicit request through RPCS for full
2073 * enablement.
2074 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002075 if (INTEL_INFO(dev_priv)->has_slice_pg) {
Jeff McGee0cea6502015-02-13 10:27:56 -06002076 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
Chris Wilsonc0336662016-05-06 15:40:21 +01002077 rpcs |= INTEL_INFO(dev_priv)->slice_total <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002078 GEN8_RPCS_S_CNT_SHIFT;
2079 rpcs |= GEN8_RPCS_ENABLE;
2080 }
2081
Chris Wilsonc0336662016-05-06 15:40:21 +01002082 if (INTEL_INFO(dev_priv)->has_subslice_pg) {
Jeff McGee0cea6502015-02-13 10:27:56 -06002083 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
Chris Wilsonc0336662016-05-06 15:40:21 +01002084 rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002085 GEN8_RPCS_SS_CNT_SHIFT;
2086 rpcs |= GEN8_RPCS_ENABLE;
2087 }
2088
Chris Wilsonc0336662016-05-06 15:40:21 +01002089 if (INTEL_INFO(dev_priv)->has_eu_pg) {
2090 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002091 GEN8_RPCS_EU_MIN_SHIFT;
Chris Wilsonc0336662016-05-06 15:40:21 +01002092 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002093 GEN8_RPCS_EU_MAX_SHIFT;
2094 rpcs |= GEN8_RPCS_ENABLE;
2095 }
2096
2097 return rpcs;
2098}
2099
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002100static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
Michel Thierry71562912016-02-23 10:31:49 +00002101{
2102 u32 indirect_ctx_offset;
2103
Chris Wilsonc0336662016-05-06 15:40:21 +01002104 switch (INTEL_GEN(engine->i915)) {
Michel Thierry71562912016-02-23 10:31:49 +00002105 default:
Chris Wilsonc0336662016-05-06 15:40:21 +01002106 MISSING_CASE(INTEL_GEN(engine->i915));
Michel Thierry71562912016-02-23 10:31:49 +00002107 /* fall through */
2108 case 9:
2109 indirect_ctx_offset =
2110 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2111 break;
2112 case 8:
2113 indirect_ctx_offset =
2114 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2115 break;
2116 }
2117
2118 return indirect_ctx_offset;
2119}
2120
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002121static int
Chris Wilsone2efd132016-05-24 14:53:34 +01002122populate_lr_context(struct i915_gem_context *ctx,
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002123 struct drm_i915_gem_object *ctx_obj,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002124 struct intel_engine_cs *engine,
Chris Wilson7e37f882016-08-02 22:50:21 +01002125 struct intel_ring *ring)
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002126{
Chris Wilsonc0336662016-05-06 15:40:21 +01002127 struct drm_i915_private *dev_priv = ctx->i915;
Daniel Vetterae6c4802014-08-06 15:04:53 +02002128 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002129 void *vaddr;
2130 u32 *reg_state;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002131 int ret;
2132
Thomas Daniel2d965532014-08-19 10:13:36 +01002133 if (!ppgtt)
2134 ppgtt = dev_priv->mm.aliasing_ppgtt;
2135
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002136 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2137 if (ret) {
2138 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2139 return ret;
2140 }
2141
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002142 vaddr = i915_gem_object_pin_map(ctx_obj);
2143 if (IS_ERR(vaddr)) {
2144 ret = PTR_ERR(vaddr);
2145 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002146 return ret;
2147 }
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002148 ctx_obj->dirty = true;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002149
2150 /* The second page of the context object contains some fields which must
2151 * be set up prior to the first execution. */
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002152 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002153
2154 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2155 * commands followed by (reg, value) pairs. The values we are setting here are
2156 * only for the first context restore: on a subsequent save, the GPU will
2157 * recreate this batchbuffer with new values (including all the missing
2158 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002159 reg_state[CTX_LRI_HEADER_0] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002160 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2161 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
2162 RING_CONTEXT_CONTROL(engine),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002163 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2164 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
Chris Wilsonc0336662016-05-06 15:40:21 +01002165 (HAS_RESOURCE_STREAMER(dev_priv) ?
Michel Thierry99cf8ea2016-02-25 09:48:58 +00002166 CTX_CTRL_RS_CTX_ENABLE : 0)));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002167 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2168 0);
2169 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
2170 0);
Thomas Daniel7ba717c2014-11-13 10:28:56 +00002171 /* Ring buffer start address is not known until the buffer is pinned.
2172 * It is written to the context image in execlists_update_context()
2173 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002174 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
2175 RING_START(engine->mmio_base), 0);
2176 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
2177 RING_CTL(engine->mmio_base),
Chris Wilson7e37f882016-08-02 22:50:21 +01002178 ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002179 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
2180 RING_BBADDR_UDW(engine->mmio_base), 0);
2181 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
2182 RING_BBADDR(engine->mmio_base), 0);
2183 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
2184 RING_BBSTATE(engine->mmio_base),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002185 RING_BB_PPGTT);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002186 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
2187 RING_SBBADDR_UDW(engine->mmio_base), 0);
2188 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
2189 RING_SBBADDR(engine->mmio_base), 0);
2190 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2191 RING_SBBSTATE(engine->mmio_base), 0);
2192 if (engine->id == RCS) {
2193 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2194 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2195 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2196 RING_INDIRECT_CTX(engine->mmio_base), 0);
2197 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2198 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2199 if (engine->wa_ctx.obj) {
2200 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
Arun Siluvery17ee9502015-06-19 19:07:01 +01002201 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2202
2203 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2204 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2205 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2206
2207 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002208 intel_lr_indirect_ctx_offset(engine) << 6;
Arun Siluvery17ee9502015-06-19 19:07:01 +01002209
2210 reg_state[CTX_BB_PER_CTX_PTR+1] =
2211 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2212 0x01;
2213 }
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002214 }
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002215 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002216 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2217 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002218 /* PDP values well be assigned later if needed */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002219 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2220 0);
2221 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2222 0);
2223 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2224 0);
2225 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2226 0);
2227 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2228 0);
2229 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2230 0);
2231 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2232 0);
2233 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2234 0);
Michel Thierryd7b26332015-04-08 12:13:34 +01002235
Michel Thierry2dba3232015-07-30 11:06:23 +01002236 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2237 /* 64b PPGTT (48bit canonical)
2238 * PDP0_DESCRIPTOR contains the base address to PML4 and
2239 * other PDP Descriptors are ignored.
2240 */
2241 ASSIGN_CTX_PML4(ppgtt, reg_state);
2242 } else {
2243 /* 32b PPGTT
2244 * PDP*_DESCRIPTOR contains the base address of space supported.
2245 * With dynamic page allocation, PDPs may not be allocated at
2246 * this point. Point the unallocated PDPs to the scratch page
2247 */
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +00002248 execlists_update_context_pdps(ppgtt, reg_state);
Michel Thierry2dba3232015-07-30 11:06:23 +01002249 }
2250
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002251 if (engine->id == RCS) {
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002252 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002253 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
Chris Wilsonc0336662016-05-06 15:40:21 +01002254 make_rpcs(dev_priv));
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002255 }
2256
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002257 i915_gem_object_unpin_map(ctx_obj);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002258
2259 return 0;
2260}
2261
Oscar Mateo73e4d072014-07-24 17:04:48 +01002262/**
Dave Gordonc5d46ee2016-01-05 12:21:33 +00002263 * intel_lr_context_size() - return the size of the context for an engine
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002264 * @engine: which engine to find the context size for
Dave Gordonc5d46ee2016-01-05 12:21:33 +00002265 *
2266 * Each engine may require a different amount of space for a context image,
2267 * so when allocating (or copying) an image, this function can be used to
2268 * find the right size for the specific engine.
2269 *
2270 * Return: size (in bytes) of an engine-specific context image
2271 *
2272 * Note: this size includes the HWSP, which is part of the context image
2273 * in LRC mode, but does not include the "shared data page" used with
2274 * GuC submission. The caller should account for this if using the GuC.
2275 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002276uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
Oscar Mateo8c8579172014-07-24 17:04:14 +01002277{
2278 int ret = 0;
2279
Chris Wilsonc0336662016-05-06 15:40:21 +01002280 WARN_ON(INTEL_GEN(engine->i915) < 8);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002281
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002282 switch (engine->id) {
Oscar Mateo8c8579172014-07-24 17:04:14 +01002283 case RCS:
Chris Wilsonc0336662016-05-06 15:40:21 +01002284 if (INTEL_GEN(engine->i915) >= 9)
Michael H. Nguyen468c6812014-11-13 17:51:49 +00002285 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2286 else
2287 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002288 break;
2289 case VCS:
2290 case BCS:
2291 case VECS:
2292 case VCS2:
2293 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2294 break;
2295 }
2296
2297 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01002298}
2299
Chris Wilsone2efd132016-05-24 14:53:34 +01002300static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
Chris Wilson978f1e02016-04-28 09:56:54 +01002301 struct intel_engine_cs *engine)
Oscar Mateoede7d422014-07-24 17:04:12 +01002302{
Oscar Mateo8c8579172014-07-24 17:04:14 +01002303 struct drm_i915_gem_object *ctx_obj;
Chris Wilson9021ad02016-05-24 14:53:37 +01002304 struct intel_context *ce = &ctx->engine[engine->id];
Oscar Mateo8c8579172014-07-24 17:04:14 +01002305 uint32_t context_size;
Chris Wilson7e37f882016-08-02 22:50:21 +01002306 struct intel_ring *ring;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002307 int ret;
2308
Chris Wilson9021ad02016-05-24 14:53:37 +01002309 WARN_ON(ce->state);
Oscar Mateoede7d422014-07-24 17:04:12 +01002310
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002311 context_size = round_up(intel_lr_context_size(engine), 4096);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002312
Alex Daid1675192015-08-12 15:43:43 +01002313 /* One extra page as the sharing data between driver and GuC */
2314 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2315
Chris Wilson91c8a322016-07-05 10:40:23 +01002316 ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
Chris Wilsonfe3db792016-04-25 13:32:13 +01002317 if (IS_ERR(ctx_obj)) {
Dan Carpenter3126a662015-04-30 17:30:50 +03002318 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
Chris Wilsonfe3db792016-04-25 13:32:13 +01002319 return PTR_ERR(ctx_obj);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002320 }
2321
Chris Wilson7e37f882016-08-02 22:50:21 +01002322 ring = intel_engine_create_ring(engine, ctx->ring_size);
Chris Wilsondca33ec2016-08-02 22:50:20 +01002323 if (IS_ERR(ring)) {
2324 ret = PTR_ERR(ring);
Nick Hoathe84fe802015-09-11 12:53:46 +01002325 goto error_deref_obj;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002326 }
2327
Chris Wilsondca33ec2016-08-02 22:50:20 +01002328 ret = populate_lr_context(ctx, ctx_obj, engine, ring);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002329 if (ret) {
2330 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
Chris Wilsondca33ec2016-08-02 22:50:20 +01002331 goto error_ring_free;
Oscar Mateo84c23772014-07-24 17:04:15 +01002332 }
2333
Chris Wilsondca33ec2016-08-02 22:50:20 +01002334 ce->ring = ring;
Chris Wilson9021ad02016-05-24 14:53:37 +01002335 ce->state = ctx_obj;
2336 ce->initialised = engine->init_context == NULL;
Oscar Mateoede7d422014-07-24 17:04:12 +01002337
2338 return 0;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002339
Chris Wilsondca33ec2016-08-02 22:50:20 +01002340error_ring_free:
Chris Wilson7e37f882016-08-02 22:50:21 +01002341 intel_ring_free(ring);
Nick Hoathe84fe802015-09-11 12:53:46 +01002342error_deref_obj:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01002343 i915_gem_object_put(ctx_obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01002344 ce->ring = NULL;
Chris Wilson9021ad02016-05-24 14:53:37 +01002345 ce->state = NULL;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002346 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01002347}
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002348
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002349void intel_lr_context_reset(struct drm_i915_private *dev_priv,
Chris Wilsone2efd132016-05-24 14:53:34 +01002350 struct i915_gem_context *ctx)
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002351{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002352 struct intel_engine_cs *engine;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002353
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002354 for_each_engine(engine, dev_priv) {
Chris Wilson9021ad02016-05-24 14:53:37 +01002355 struct intel_context *ce = &ctx->engine[engine->id];
2356 struct drm_i915_gem_object *ctx_obj = ce->state;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002357 void *vaddr;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002358 uint32_t *reg_state;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002359
2360 if (!ctx_obj)
2361 continue;
2362
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002363 vaddr = i915_gem_object_pin_map(ctx_obj);
2364 if (WARN_ON(IS_ERR(vaddr)))
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002365 continue;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002366
2367 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2368 ctx_obj->dirty = true;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002369
2370 reg_state[CTX_RING_HEAD+1] = 0;
2371 reg_state[CTX_RING_TAIL+1] = 0;
2372
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002373 i915_gem_object_unpin_map(ctx_obj);
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002374
Chris Wilsondca33ec2016-08-02 22:50:20 +01002375 ce->ring->head = 0;
2376 ce->ring->tail = 0;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002377 }
2378}