blob: 6342b5a1c14e0c60d869d34cef9b1b1c36fb283a [file] [log] [blame]
Oscar Mateob20385f2014-07-24 17:04:10 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
Oscar Mateo73e4d072014-07-24 17:04:48 +010031/**
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
33 *
34 * Motivation:
Oscar Mateob20385f2014-07-24 17:04:10 +010035 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
38 *
Oscar Mateo73e4d072014-07-24 17:04:48 +010039 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
42 *
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
47 *
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
55 *
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
58 *
59 * LRC implementation:
60 * Regarding the creation of contexts, we have:
61 *
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
65 *
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
69 *
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
72 *
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
77 * contexts:
78 *
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
83 * so on.
84 *
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
88 *
89 * Execlists implementation:
Oscar Mateob20385f2014-07-24 17:04:10 +010090 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
Oscar Mateo73e4d072014-07-24 17:04:48 +010092 * This method works as follows:
93 *
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
101 * context itself.
102 *
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
108 *
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
114 *
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
122 *
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
132 *
Oscar Mateob20385f2014-07-24 17:04:10 +0100133 */
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100134#include <linux/interrupt.h>
Oscar Mateob20385f2014-07-24 17:04:10 +0100135
136#include <drm/drmP.h>
137#include <drm/i915_drm.h>
138#include "i915_drv.h"
Peter Antoine3bbaba02015-07-10 20:13:11 +0300139#include "intel_mocs.h"
Oscar Mateo127f1002014-07-24 17:04:11 +0100140
Michael H. Nguyen468c6812014-11-13 17:51:49 +0000141#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
Oscar Mateo8c8579172014-07-24 17:04:14 +0100142#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144
Thomas Daniele981e7b2014-07-24 17:04:39 +0100145#define RING_EXECLIST_QFULL (1 << 0x2)
146#define RING_EXECLIST1_VALID (1 << 0x3)
147#define RING_EXECLIST0_VALID (1 << 0x4)
148#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
149#define RING_EXECLIST1_ACTIVE (1 << 0x11)
150#define RING_EXECLIST0_ACTIVE (1 << 0x12)
151
152#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
153#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
154#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
155#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
156#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
157#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100158
159#define CTX_LRI_HEADER_0 0x01
160#define CTX_CONTEXT_CONTROL 0x02
161#define CTX_RING_HEAD 0x04
162#define CTX_RING_TAIL 0x06
163#define CTX_RING_BUFFER_START 0x08
164#define CTX_RING_BUFFER_CONTROL 0x0a
165#define CTX_BB_HEAD_U 0x0c
166#define CTX_BB_HEAD_L 0x0e
167#define CTX_BB_STATE 0x10
168#define CTX_SECOND_BB_HEAD_U 0x12
169#define CTX_SECOND_BB_HEAD_L 0x14
170#define CTX_SECOND_BB_STATE 0x16
171#define CTX_BB_PER_CTX_PTR 0x18
172#define CTX_RCS_INDIRECT_CTX 0x1a
173#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
174#define CTX_LRI_HEADER_1 0x21
175#define CTX_CTX_TIMESTAMP 0x22
176#define CTX_PDP3_UDW 0x24
177#define CTX_PDP3_LDW 0x26
178#define CTX_PDP2_UDW 0x28
179#define CTX_PDP2_LDW 0x2a
180#define CTX_PDP1_UDW 0x2c
181#define CTX_PDP1_LDW 0x2e
182#define CTX_PDP0_UDW 0x30
183#define CTX_PDP0_LDW 0x32
184#define CTX_LRI_HEADER_2 0x41
185#define CTX_R_PWR_CLK_STATE 0x42
186#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
187
Ben Widawsky84b790f2014-07-24 17:04:36 +0100188#define GEN8_CTX_VALID (1<<0)
189#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
190#define GEN8_CTX_FORCE_RESTORE (1<<2)
191#define GEN8_CTX_L3LLC_COHERENT (1<<5)
192#define GEN8_CTX_PRIVILEGE (1<<8)
Michel Thierrye5815a22015-04-08 12:13:32 +0100193
Ville Syrjälä0d925ea2015-11-04 23:20:11 +0200194#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200195 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
Ville Syrjälä0d925ea2015-11-04 23:20:11 +0200196 (reg_state)[(pos)+1] = (val); \
197} while (0)
198
199#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300200 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
Michel Thierrye5815a22015-04-08 12:13:32 +0100201 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
202 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
Ville Syrjälä9244a812015-11-04 23:20:09 +0200203} while (0)
Michel Thierrye5815a22015-04-08 12:13:32 +0100204
Ville Syrjälä9244a812015-11-04 23:20:09 +0200205#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
Michel Thierry2dba3232015-07-30 11:06:23 +0100206 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
207 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
Ville Syrjälä9244a812015-11-04 23:20:09 +0200208} while (0)
Michel Thierry2dba3232015-07-30 11:06:23 +0100209
Ben Widawsky84b790f2014-07-24 17:04:36 +0100210enum {
211 ADVANCED_CONTEXT = 0,
Michel Thierry2dba3232015-07-30 11:06:23 +0100212 LEGACY_32B_CONTEXT,
Ben Widawsky84b790f2014-07-24 17:04:36 +0100213 ADVANCED_AD_CONTEXT,
214 LEGACY_64B_CONTEXT
215};
Michel Thierry2dba3232015-07-30 11:06:23 +0100216#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
217#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
218 LEGACY_64B_CONTEXT :\
219 LEGACY_32B_CONTEXT)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100220enum {
221 FAULT_AND_HANG = 0,
222 FAULT_AND_HALT, /* Debug only */
223 FAULT_AND_STREAM,
224 FAULT_AND_CONTINUE /* Unsupported */
225};
226#define GEN8_CTX_ID_SHIFT 32
Chris Wilson7069b142016-04-28 09:56:52 +0100227#define GEN8_CTX_ID_WIDTH 21
Michel Thierry71562912016-02-23 10:31:49 +0000228#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
229#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
Ben Widawsky84b790f2014-07-24 17:04:36 +0100230
Chris Wilson0e93cdd2016-04-29 09:07:06 +0100231/* Typical size of the average request (2 pipecontrols and a MI_BB) */
232#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
233
Chris Wilsone2efd132016-05-24 14:53:34 +0100234static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
Chris Wilson978f1e02016-04-28 09:56:54 +0100235 struct intel_engine_cs *engine);
Chris Wilsone2efd132016-05-24 14:53:34 +0100236static int intel_lr_context_pin(struct i915_gem_context *ctx,
Tvrtko Ursuline52928232016-01-28 10:29:54 +0000237 struct intel_engine_cs *engine);
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000238
Oscar Mateo73e4d072014-07-24 17:04:48 +0100239/**
240 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100241 * @dev_priv: i915 device private
Oscar Mateo73e4d072014-07-24 17:04:48 +0100242 * @enable_execlists: value of i915.enable_execlists module parameter.
243 *
244 * Only certain platforms support Execlists (the prerequisites being
Thomas Daniel27401d12014-12-11 12:48:35 +0000245 * support for Logical Ring Contexts and Aliasing PPGTT or better).
Oscar Mateo73e4d072014-07-24 17:04:48 +0100246 *
247 * Return: 1 if Execlists is supported and has to be enabled.
248 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100249int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
Oscar Mateo127f1002014-07-24 17:04:11 +0100250{
Zhiyuan Lva0bd6c32015-08-28 15:41:16 +0800251 /* On platforms with execlist available, vGPU will only
252 * support execlist mode, no ring buffer mode.
253 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100254 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
Zhiyuan Lva0bd6c32015-08-28 15:41:16 +0800255 return 1;
256
Chris Wilsonc0336662016-05-06 15:40:21 +0100257 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau70ee45e2014-11-14 15:05:59 +0000258 return 1;
259
Oscar Mateo127f1002014-07-24 17:04:11 +0100260 if (enable_execlists == 0)
261 return 0;
262
Daniel Vetter5a21b662016-05-24 17:13:53 +0200263 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
264 USES_PPGTT(dev_priv) &&
265 i915.use_mmio_flip >= 0)
Oscar Mateo127f1002014-07-24 17:04:11 +0100266 return 1;
267
268 return 0;
269}
Oscar Mateoede7d422014-07-24 17:04:12 +0100270
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000271static void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000272logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000273{
Chris Wilsonc0336662016-05-06 15:40:21 +0100274 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000275
Chris Wilsonc0336662016-05-06 15:40:21 +0100276 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000277 engine->idle_lite_restore_wa = ~0;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000278
Chris Wilsonc0336662016-05-06 15:40:21 +0100279 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
280 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000281 (engine->id == VCS || engine->id == VCS2);
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000282
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000283 engine->ctx_desc_template = GEN8_CTX_VALID;
Chris Wilsonc0336662016-05-06 15:40:21 +0100284 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000285 GEN8_CTX_ADDRESSING_MODE_SHIFT;
Chris Wilsonc0336662016-05-06 15:40:21 +0100286 if (IS_GEN8(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000287 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
288 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000289
290 /* TODO: WaDisableLiteRestore when we start using semaphore
291 * signalling between Command Streamers */
292 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
293
294 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
295 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000296 if (engine->disable_lite_restore_wa)
297 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000298}
299
300/**
301 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
302 * descriptor for a pinned context
303 *
304 * @ctx: Context to work on
Chris Wilson9021ad02016-05-24 14:53:37 +0100305 * @engine: Engine the descriptor will be used with
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000306 *
307 * The context descriptor encodes various attributes of a context,
308 * including its GTT address and some flags. Because it's fairly
309 * expensive to calculate, we'll just do it once and cache the result,
310 * which remains valid until the context is unpinned.
311 *
312 * This is what a descriptor looks like, from LSB to MSB:
Chris Wilsonef87bba2016-04-28 09:56:50 +0100313 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000314 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
Chris Wilson7069b142016-04-28 09:56:52 +0100315 * bits 32-52: ctx ID, a globally unique tag
Chris Wilsonef87bba2016-04-28 09:56:50 +0100316 * bits 53-54: mbz, reserved for use by hardware
317 * bits 55-63: group ID, currently unused and set to 0
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000318 */
319static void
Chris Wilsone2efd132016-05-24 14:53:34 +0100320intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000321 struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000322{
Chris Wilson9021ad02016-05-24 14:53:37 +0100323 struct intel_context *ce = &ctx->engine[engine->id];
Chris Wilson7069b142016-04-28 09:56:52 +0100324 u64 desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000325
Chris Wilson7069b142016-04-28 09:56:52 +0100326 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
327
328 desc = engine->ctx_desc_template; /* bits 0-11 */
Chris Wilson9021ad02016-05-24 14:53:37 +0100329 desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
330 /* bits 12-31 */
Chris Wilson7069b142016-04-28 09:56:52 +0100331 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000332
Chris Wilson9021ad02016-05-24 14:53:37 +0100333 ce->lrc_desc = desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000334}
335
Chris Wilsone2efd132016-05-24 14:53:34 +0100336uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000337 struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000338{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000339 return ctx->engine[engine->id].lrc_desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000340}
341
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300342static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
343 struct drm_i915_gem_request *rq1)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100344{
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300345
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000346 struct intel_engine_cs *engine = rq0->engine;
Chris Wilsonc0336662016-05-06 15:40:21 +0100347 struct drm_i915_private *dev_priv = rq0->i915;
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300348 uint64_t desc[2];
Ben Widawsky84b790f2014-07-24 17:04:36 +0100349
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300350 if (rq1) {
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000351 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300352 rq1->elsp_submitted++;
353 } else {
354 desc[1] = 0;
355 }
Ben Widawsky84b790f2014-07-24 17:04:36 +0100356
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000357 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300358 rq0->elsp_submitted++;
Ben Widawsky84b790f2014-07-24 17:04:36 +0100359
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300360 /* You must always write both descriptors in the order below. */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000361 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
362 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
Chris Wilson6daccb02015-01-16 11:34:35 +0200363
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000364 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100365 /* The context is automatically loaded after the following */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000366 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100367
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300368 /* ELSP is a wo register, use another nearby reg for posting */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000369 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100370}
371
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000372static void
373execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
374{
375 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
376 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
377 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
378 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
379}
380
381static void execlists_update_context(struct drm_i915_gem_request *rq)
Oscar Mateoae1250b2014-07-24 17:04:37 +0100382{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000383 struct intel_engine_cs *engine = rq->engine;
Mika Kuoppala05d98242015-07-03 17:09:33 +0300384 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000385 uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
Oscar Mateoae1250b2014-07-24 17:04:37 +0100386
Mika Kuoppala05d98242015-07-03 17:09:33 +0300387 reg_state[CTX_RING_TAIL+1] = rq->tail;
Oscar Mateoae1250b2014-07-24 17:04:37 +0100388
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000389 /* True 32b PPGTT with dynamic page allocation: update PDP
390 * registers and point the unallocated PDPs to scratch page.
391 * PML4 is allocated during ppgtt init, so this is not needed
392 * in 48-bit mode.
393 */
394 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
395 execlists_update_context_pdps(ppgtt, reg_state);
Oscar Mateoae1250b2014-07-24 17:04:37 +0100396}
397
Mika Kuoppalad8cb8872015-07-03 17:09:32 +0300398static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
399 struct drm_i915_gem_request *rq1)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100400{
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000401 struct drm_i915_private *dev_priv = rq0->i915;
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100402 unsigned int fw_domains = rq0->engine->fw_domains;
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000403
Mika Kuoppala05d98242015-07-03 17:09:33 +0300404 execlists_update_context(rq0);
Oscar Mateoae1250b2014-07-24 17:04:37 +0100405
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300406 if (rq1)
Mika Kuoppala05d98242015-07-03 17:09:33 +0300407 execlists_update_context(rq1);
Ben Widawsky84b790f2014-07-24 17:04:36 +0100408
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100409 spin_lock_irq(&dev_priv->uncore.lock);
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100410 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000411
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300412 execlists_elsp_write(rq0, rq1);
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000413
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100414 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100415 spin_unlock_irq(&dev_priv->uncore.lock);
Ben Widawsky84b790f2014-07-24 17:04:36 +0100416}
417
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000418static void execlists_context_unqueue(struct intel_engine_cs *engine)
Michel Thierryacdd8842014-07-24 17:04:38 +0100419{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000420 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000421 struct drm_i915_gem_request *cursor, *tmp;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100422
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000423 assert_spin_locked(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100424
Peter Antoine779949f2015-05-11 16:03:27 +0100425 /*
426 * If irqs are not active generate a warning as batches that finish
427 * without the irqs may get lost and a GPU Hang may occur.
428 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100429 WARN_ON(!intel_irqs_enabled(engine->i915));
Peter Antoine779949f2015-05-11 16:03:27 +0100430
Michel Thierryacdd8842014-07-24 17:04:38 +0100431 /* Try to read in pairs */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000432 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
Michel Thierryacdd8842014-07-24 17:04:38 +0100433 execlist_link) {
434 if (!req0) {
435 req0 = cursor;
Nick Hoath6d3d8272015-01-15 13:10:39 +0000436 } else if (req0->ctx == cursor->ctx) {
Michel Thierryacdd8842014-07-24 17:04:38 +0100437 /* Same ctx: ignore first request, as second request
438 * will update tail past first request's workload */
Oscar Mateoe1fee722014-07-24 17:04:40 +0100439 cursor->elsp_submitted = req0->elsp_submitted;
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100440 list_del(&req0->execlist_link);
441 i915_gem_request_unreference(req0);
Michel Thierryacdd8842014-07-24 17:04:38 +0100442 req0 = cursor;
443 } else {
444 req1 = cursor;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000445 WARN_ON(req1->elsp_submitted);
Michel Thierryacdd8842014-07-24 17:04:38 +0100446 break;
447 }
448 }
449
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000450 if (unlikely(!req0))
451 return;
452
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000453 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
Michel Thierry53292cd2015-04-15 18:11:33 +0100454 /*
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000455 * WaIdleLiteRestore: make sure we never cause a lite restore
456 * with HEAD==TAIL.
457 *
458 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
459 * resubmit the request. See gen8_emit_request() for where we
460 * prepare the padding after the end of the request.
Michel Thierry53292cd2015-04-15 18:11:33 +0100461 */
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000462 struct intel_ringbuffer *ringbuf;
Michel Thierry53292cd2015-04-15 18:11:33 +0100463
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000464 ringbuf = req0->ctx->engine[engine->id].ringbuf;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000465 req0->tail += 8;
466 req0->tail &= ringbuf->size - 1;
Michel Thierry53292cd2015-04-15 18:11:33 +0100467 }
468
Mika Kuoppalad8cb8872015-07-03 17:09:32 +0300469 execlists_submit_requests(req0, req1);
Michel Thierryacdd8842014-07-24 17:04:38 +0100470}
471
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000472static unsigned int
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100473execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
Thomas Daniele981e7b2014-07-24 17:04:39 +0100474{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000475 struct drm_i915_gem_request *head_req;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100476
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000477 assert_spin_locked(&engine->execlist_lock);
Thomas Daniele981e7b2014-07-24 17:04:39 +0100478
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000479 head_req = list_first_entry_or_null(&engine->execlist_queue,
Nick Hoath6d3d8272015-01-15 13:10:39 +0000480 struct drm_i915_gem_request,
Thomas Daniele981e7b2014-07-24 17:04:39 +0100481 execlist_link);
482
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100483 if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
484 return 0;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100485
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000486 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
487
488 if (--head_req->elsp_submitted > 0)
489 return 0;
490
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100491 list_del(&head_req->execlist_link);
492 i915_gem_request_unreference(head_req);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000493
494 return 1;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100495}
496
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000497static u32
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000498get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000499 u32 *context_id)
Ben Widawsky91a41032016-01-05 10:30:07 -0800500{
Chris Wilsonc0336662016-05-06 15:40:21 +0100501 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000502 u32 status;
Ben Widawsky91a41032016-01-05 10:30:07 -0800503
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000504 read_pointer %= GEN8_CSB_ENTRIES;
Ben Widawsky91a41032016-01-05 10:30:07 -0800505
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000506 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000507
508 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
509 return 0;
510
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000511 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000512 read_pointer));
513
514 return status;
Ben Widawsky91a41032016-01-05 10:30:07 -0800515}
516
Oscar Mateo73e4d072014-07-24 17:04:48 +0100517/**
Daniel Vetter3f7531c2014-12-10 17:41:43 +0100518 * intel_lrc_irq_handler() - handle Context Switch interrupts
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100519 * @data: tasklet handler passed in unsigned long
Oscar Mateo73e4d072014-07-24 17:04:48 +0100520 *
521 * Check the unread Context Status Buffers and manage the submission of new
522 * contexts to the ELSP accordingly.
523 */
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100524static void intel_lrc_irq_handler(unsigned long data)
Thomas Daniele981e7b2014-07-24 17:04:39 +0100525{
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100526 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
Chris Wilsonc0336662016-05-06 15:40:21 +0100527 struct drm_i915_private *dev_priv = engine->i915;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100528 u32 status_pointer;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000529 unsigned int read_pointer, write_pointer;
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000530 u32 csb[GEN8_CSB_ENTRIES][2];
531 unsigned int csb_read = 0, i;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000532 unsigned int submit_contexts = 0;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100533
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100534 intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000535
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000536 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
Thomas Daniele981e7b2014-07-24 17:04:39 +0100537
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000538 read_pointer = engine->next_context_status_buffer;
Ben Widawsky5590a5f2016-01-05 10:30:05 -0800539 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
Thomas Daniele981e7b2014-07-24 17:04:39 +0100540 if (read_pointer > write_pointer)
Michel Thierrydfc53c52015-09-28 13:25:12 +0100541 write_pointer += GEN8_CSB_ENTRIES;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100542
Thomas Daniele981e7b2014-07-24 17:04:39 +0100543 while (read_pointer < write_pointer) {
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000544 if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
545 break;
546 csb[csb_read][0] = get_context_status(engine, ++read_pointer,
547 &csb[csb_read][1]);
548 csb_read++;
Michel Thierry5af05fe2015-09-04 12:59:15 +0100549 }
Thomas Daniele981e7b2014-07-24 17:04:39 +0100550
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000551 engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100552
Ben Widawsky5590a5f2016-01-05 10:30:05 -0800553 /* Update the read pointer to the old write pointer. Manual ringbuffer
554 * management ftw </sarcasm> */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000555 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000556 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000557 engine->next_context_status_buffer << 8));
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000558
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100559 intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000560
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000561 spin_lock(&engine->execlist_lock);
562
563 for (i = 0; i < csb_read; i++) {
564 if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
565 if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
566 if (execlists_check_remove_request(engine, csb[i][1]))
567 WARN(1, "Lite Restored request removed from queue\n");
568 } else
569 WARN(1, "Preemption without Lite Restore\n");
570 }
571
572 if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
573 GEN8_CTX_STATUS_ELEMENT_SWITCH))
574 submit_contexts +=
575 execlists_check_remove_request(engine, csb[i][1]);
576 }
577
578 if (submit_contexts) {
579 if (!engine->disable_lite_restore_wa ||
580 (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
581 execlists_context_unqueue(engine);
582 }
583
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000584 spin_unlock(&engine->execlist_lock);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000585
586 if (unlikely(submit_contexts > 2))
587 DRM_ERROR("More than two context complete events?\n");
Thomas Daniele981e7b2014-07-24 17:04:39 +0100588}
589
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000590static void execlists_context_queue(struct drm_i915_gem_request *request)
Michel Thierryacdd8842014-07-24 17:04:38 +0100591{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000592 struct intel_engine_cs *engine = request->engine;
Nick Hoath6d3d8272015-01-15 13:10:39 +0000593 struct drm_i915_gem_request *cursor;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100594 int num_elements = 0;
Michel Thierryacdd8842014-07-24 17:04:38 +0100595
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100596 spin_lock_bh(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100597
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000598 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100599 if (++num_elements > 2)
600 break;
601
602 if (num_elements > 2) {
Nick Hoath6d3d8272015-01-15 13:10:39 +0000603 struct drm_i915_gem_request *tail_req;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100604
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000605 tail_req = list_last_entry(&engine->execlist_queue,
Nick Hoath6d3d8272015-01-15 13:10:39 +0000606 struct drm_i915_gem_request,
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100607 execlist_link);
608
John Harrisonae707972015-05-29 17:44:14 +0100609 if (request->ctx == tail_req->ctx) {
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100610 WARN(tail_req->elsp_submitted != 0,
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000611 "More than 2 already-submitted reqs queued\n");
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100612 list_del(&tail_req->execlist_link);
613 i915_gem_request_unreference(tail_req);
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100614 }
615 }
616
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100617 i915_gem_request_reference(request);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000618 list_add_tail(&request->execlist_link, &engine->execlist_queue);
Tvrtko Ursulina3d12762016-04-28 09:56:57 +0100619 request->ctx_hw_id = request->ctx->hw_id;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100620 if (num_elements == 0)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000621 execlists_context_unqueue(engine);
Michel Thierryacdd8842014-07-24 17:04:38 +0100622
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100623 spin_unlock_bh(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100624}
625
John Harrison2f200552015-05-29 17:43:53 +0100626static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100627{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000628 struct intel_engine_cs *engine = req->engine;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100629 uint32_t flush_domains;
630 int ret;
631
632 flush_domains = 0;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000633 if (engine->gpu_caches_dirty)
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100634 flush_domains = I915_GEM_GPU_DOMAINS;
635
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000636 ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100637 if (ret)
638 return ret;
639
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000640 engine->gpu_caches_dirty = false;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100641 return 0;
642}
643
John Harrison535fbe82015-05-29 17:43:32 +0100644static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100645 struct list_head *vmas)
646{
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000647 const unsigned other_rings = ~intel_engine_flag(req->engine);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100648 struct i915_vma *vma;
649 uint32_t flush_domains = 0;
650 bool flush_chipset = false;
651 int ret;
652
653 list_for_each_entry(vma, vmas, exec_list) {
654 struct drm_i915_gem_object *obj = vma->obj;
655
Chris Wilson03ade512015-04-27 13:41:18 +0100656 if (obj->active & other_rings) {
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000657 ret = i915_gem_object_sync(obj, req->engine, &req);
Chris Wilson03ade512015-04-27 13:41:18 +0100658 if (ret)
659 return ret;
660 }
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100661
662 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
663 flush_chipset |= i915_gem_clflush_object(obj, false);
664
665 flush_domains |= obj->base.write_domain;
666 }
667
668 if (flush_domains & I915_GEM_DOMAIN_GTT)
669 wmb();
670
671 /* Unconditionally invalidate gpu caches and ensure that we do flush
672 * any residual writes from the previous batch.
673 */
John Harrison2f200552015-05-29 17:43:53 +0100674 return logical_ring_invalidate_all_caches(req);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100675}
676
John Harrison40e895c2015-05-29 17:43:26 +0100677int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
John Harrisonbc0dce32015-03-19 12:30:07 +0000678{
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100679 struct intel_engine_cs *engine = request->engine;
Chris Wilson9021ad02016-05-24 14:53:37 +0100680 struct intel_context *ce = &request->ctx->engine[engine->id];
Chris Wilsonbfa01202016-04-28 09:56:48 +0100681 int ret;
John Harrisonbc0dce32015-03-19 12:30:07 +0000682
Chris Wilson63103462016-04-28 09:56:49 +0100683 /* Flush enough space to reduce the likelihood of waiting after
684 * we start building the request - in which case we will just
685 * have to repeat work.
686 */
Chris Wilson0e93cdd2016-04-29 09:07:06 +0100687 request->reserved_space += EXECLISTS_REQUEST_SIZE;
Chris Wilson63103462016-04-28 09:56:49 +0100688
Chris Wilson9021ad02016-05-24 14:53:37 +0100689 if (!ce->state) {
Chris Wilson978f1e02016-04-28 09:56:54 +0100690 ret = execlists_context_deferred_alloc(request->ctx, engine);
691 if (ret)
692 return ret;
693 }
694
Chris Wilson9021ad02016-05-24 14:53:37 +0100695 request->ringbuf = ce->ringbuf;
Mika Kuoppalaf3cc01f2015-07-06 11:08:30 +0300696
Alex Daia7e02192015-12-16 11:45:55 -0800697 if (i915.enable_guc_submission) {
698 /*
699 * Check that the GuC has space for the request before
700 * going any further, as the i915_add_request() call
701 * later on mustn't fail ...
702 */
Dave Gordon7c2c2702016-05-13 15:36:32 +0100703 ret = i915_guc_wq_check_space(request);
Alex Daia7e02192015-12-16 11:45:55 -0800704 if (ret)
705 return ret;
706 }
707
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100708 ret = intel_lr_context_pin(request->ctx, engine);
709 if (ret)
710 return ret;
Dave Gordone28e4042016-01-19 19:02:55 +0000711
Chris Wilsonbfa01202016-04-28 09:56:48 +0100712 ret = intel_ring_begin(request, 0);
713 if (ret)
714 goto err_unpin;
715
Chris Wilson9021ad02016-05-24 14:53:37 +0100716 if (!ce->initialised) {
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100717 ret = engine->init_context(request);
718 if (ret)
719 goto err_unpin;
720
Chris Wilson9021ad02016-05-24 14:53:37 +0100721 ce->initialised = true;
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100722 }
723
724 /* Note that after this point, we have committed to using
725 * this request as it is being used to both track the
726 * state of engine initialisation and liveness of the
727 * golden renderstate above. Think twice before you try
728 * to cancel/unwind this request now.
729 */
730
Chris Wilson0e93cdd2016-04-29 09:07:06 +0100731 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
Chris Wilsonbfa01202016-04-28 09:56:48 +0100732 return 0;
733
734err_unpin:
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100735 intel_lr_context_unpin(request->ctx, engine);
Dave Gordone28e4042016-01-19 19:02:55 +0000736 return ret;
John Harrisonbc0dce32015-03-19 12:30:07 +0000737}
738
John Harrisonbc0dce32015-03-19 12:30:07 +0000739/*
740 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
John Harrisonae707972015-05-29 17:44:14 +0100741 * @request: Request to advance the logical ringbuffer of.
John Harrisonbc0dce32015-03-19 12:30:07 +0000742 *
743 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
744 * really happens during submission is that the context and current tail will be placed
745 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
746 * point, the tail *inside* the context is updated and the ELSP written to.
747 */
Chris Wilson7c17d372016-01-20 15:43:35 +0200748static int
John Harrisonae707972015-05-29 17:44:14 +0100749intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
John Harrisonbc0dce32015-03-19 12:30:07 +0000750{
Chris Wilson7c17d372016-01-20 15:43:35 +0200751 struct intel_ringbuffer *ringbuf = request->ringbuf;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000752 struct intel_engine_cs *engine = request->engine;
John Harrisonbc0dce32015-03-19 12:30:07 +0000753
Chris Wilson7c17d372016-01-20 15:43:35 +0200754 intel_logical_ring_advance(ringbuf);
755 request->tail = ringbuf->tail;
John Harrisonbc0dce32015-03-19 12:30:07 +0000756
Chris Wilson7c17d372016-01-20 15:43:35 +0200757 /*
758 * Here we add two extra NOOPs as padding to avoid
759 * lite restore of a context with HEAD==TAIL.
760 *
761 * Caller must reserve WA_TAIL_DWORDS for us!
762 */
763 intel_logical_ring_emit(ringbuf, MI_NOOP);
764 intel_logical_ring_emit(ringbuf, MI_NOOP);
765 intel_logical_ring_advance(ringbuf);
Alex Daid1675192015-08-12 15:43:43 +0100766
Tvrtko Ursulin117897f2016-03-16 11:00:40 +0000767 if (intel_engine_stopped(engine))
Chris Wilson7c17d372016-01-20 15:43:35 +0200768 return 0;
John Harrisonbc0dce32015-03-19 12:30:07 +0000769
Chris Wilsona16a4052016-04-28 09:56:56 +0100770 /* We keep the previous context alive until we retire the following
771 * request. This ensures that any the context object is still pinned
772 * for any residual writes the HW makes into it on the context switch
773 * into the next object following the breadcrumb. Otherwise, we may
774 * retire the context too early.
775 */
776 request->previous_context = engine->last_context;
777 engine->last_context = request->ctx;
Tvrtko Ursulinf4e2dec2016-01-28 10:29:57 +0000778
Dave Gordon7c2c2702016-05-13 15:36:32 +0100779 if (i915.enable_guc_submission)
780 i915_guc_submit(request);
Alex Daid1675192015-08-12 15:43:43 +0100781 else
782 execlists_context_queue(request);
Chris Wilson7c17d372016-01-20 15:43:35 +0200783
784 return 0;
John Harrisonbc0dce32015-03-19 12:30:07 +0000785}
786
Oscar Mateo73e4d072014-07-24 17:04:48 +0100787/**
788 * execlists_submission() - submit a batchbuffer for execution, Execlists style
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100789 * @params: execbuffer call parameters.
Oscar Mateo73e4d072014-07-24 17:04:48 +0100790 * @args: execbuffer call arguments.
791 * @vmas: list of vmas.
Oscar Mateo73e4d072014-07-24 17:04:48 +0100792 *
793 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
794 * away the submission details of the execbuffer ioctl call.
795 *
796 * Return: non-zero if the submission fails.
797 */
John Harrison5f19e2b2015-05-29 17:43:27 +0100798int intel_execlists_submission(struct i915_execbuffer_params *params,
Oscar Mateo454afeb2014-07-24 17:04:22 +0100799 struct drm_i915_gem_execbuffer2 *args,
John Harrison5f19e2b2015-05-29 17:43:27 +0100800 struct list_head *vmas)
Oscar Mateo454afeb2014-07-24 17:04:22 +0100801{
John Harrison5f19e2b2015-05-29 17:43:27 +0100802 struct drm_device *dev = params->dev;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000803 struct intel_engine_cs *engine = params->engine;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100804 struct drm_i915_private *dev_priv = dev->dev_private;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000805 struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
John Harrison5f19e2b2015-05-29 17:43:27 +0100806 u64 exec_start;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100807 int instp_mode;
808 u32 instp_mask;
809 int ret;
810
811 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
812 instp_mask = I915_EXEC_CONSTANTS_MASK;
813 switch (instp_mode) {
814 case I915_EXEC_CONSTANTS_REL_GENERAL:
815 case I915_EXEC_CONSTANTS_ABSOLUTE:
816 case I915_EXEC_CONSTANTS_REL_SURFACE:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000817 if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100818 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
819 return -EINVAL;
820 }
821
822 if (instp_mode != dev_priv->relative_constants_mode) {
823 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
824 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
825 return -EINVAL;
826 }
827
828 /* The HW changed the meaning on this bit on gen6 */
829 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
830 }
831 break;
832 default:
833 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
834 return -EINVAL;
835 }
836
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100837 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
838 DRM_DEBUG("sol reset is gen7 only\n");
839 return -EINVAL;
840 }
841
John Harrison535fbe82015-05-29 17:43:32 +0100842 ret = execlists_move_to_gpu(params->request, vmas);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100843 if (ret)
844 return ret;
845
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000846 if (engine == &dev_priv->engine[RCS] &&
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100847 instp_mode != dev_priv->relative_constants_mode) {
Chris Wilson987046a2016-04-28 09:56:46 +0100848 ret = intel_ring_begin(params->request, 4);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100849 if (ret)
850 return ret;
851
852 intel_logical_ring_emit(ringbuf, MI_NOOP);
853 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
Ville Syrjäläf92a9162015-11-04 23:20:07 +0200854 intel_logical_ring_emit_reg(ringbuf, INSTPM);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100855 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
856 intel_logical_ring_advance(ringbuf);
857
858 dev_priv->relative_constants_mode = instp_mode;
859 }
860
John Harrison5f19e2b2015-05-29 17:43:27 +0100861 exec_start = params->batch_obj_vm_offset +
862 args->batch_start_offset;
863
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000864 ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100865 if (ret)
866 return ret;
867
John Harrison95c24162015-05-29 17:43:31 +0100868 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
John Harrison5e4be7b2015-02-13 11:48:11 +0000869
John Harrison8a8edb52015-05-29 17:43:33 +0100870 i915_gem_execbuffer_move_to_active(vmas, params->request);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100871
Oscar Mateo454afeb2014-07-24 17:04:22 +0100872 return 0;
873}
874
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100875void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000876{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000877 struct drm_i915_gem_request *req, *tmp;
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100878 LIST_HEAD(cancel_list);
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000879
Chris Wilsonc0336662016-05-06 15:40:21 +0100880 WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000881
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100882 spin_lock_bh(&engine->execlist_lock);
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100883 list_replace_init(&engine->execlist_queue, &cancel_list);
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100884 spin_unlock_bh(&engine->execlist_lock);
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000885
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100886 list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000887 list_del(&req->execlist_link);
Nick Hoathf8210792015-01-29 16:55:07 +0000888 i915_gem_request_unreference(req);
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000889 }
890}
891
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000892void intel_logical_ring_stop(struct intel_engine_cs *engine)
Oscar Mateo454afeb2014-07-24 17:04:22 +0100893{
Chris Wilsonc0336662016-05-06 15:40:21 +0100894 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100895 int ret;
896
Tvrtko Ursulin117897f2016-03-16 11:00:40 +0000897 if (!intel_engine_initialized(engine))
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100898 return;
899
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000900 ret = intel_engine_idle(engine);
Chris Wilsonf4457ae2016-04-13 17:35:08 +0100901 if (ret)
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100902 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000903 engine->name, ret);
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100904
905 /* TODO: Is this correct with Execlists enabled? */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000906 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
907 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
908 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100909 return;
910 }
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000911 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
Oscar Mateo454afeb2014-07-24 17:04:22 +0100912}
913
John Harrison4866d722015-05-29 17:43:55 +0100914int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
Oscar Mateo48e29f52014-07-24 17:04:29 +0100915{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000916 struct intel_engine_cs *engine = req->engine;
Oscar Mateo48e29f52014-07-24 17:04:29 +0100917 int ret;
918
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000919 if (!engine->gpu_caches_dirty)
Oscar Mateo48e29f52014-07-24 17:04:29 +0100920 return 0;
921
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000922 ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
Oscar Mateo48e29f52014-07-24 17:04:29 +0100923 if (ret)
924 return ret;
925
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000926 engine->gpu_caches_dirty = false;
Oscar Mateo48e29f52014-07-24 17:04:29 +0100927 return 0;
928}
929
Chris Wilsone2efd132016-05-24 14:53:34 +0100930static int intel_lr_context_pin(struct i915_gem_context *ctx,
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100931 struct intel_engine_cs *engine)
Oscar Mateodcb4c122014-11-13 10:28:10 +0000932{
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100933 struct drm_i915_private *dev_priv = ctx->i915;
Chris Wilson9021ad02016-05-24 14:53:37 +0100934 struct intel_context *ce = &ctx->engine[engine->id];
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100935 void *vaddr;
936 u32 *lrc_reg_state;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000937 int ret;
Oscar Mateodcb4c122014-11-13 10:28:10 +0000938
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100939 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000940
Chris Wilson9021ad02016-05-24 14:53:37 +0100941 if (ce->pin_count++)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100942 return 0;
943
Chris Wilson9021ad02016-05-24 14:53:37 +0100944 ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
945 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
Nick Hoathe84fe802015-09-11 12:53:46 +0100946 if (ret)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100947 goto err;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000948
Chris Wilson9021ad02016-05-24 14:53:37 +0100949 vaddr = i915_gem_object_pin_map(ce->state);
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100950 if (IS_ERR(vaddr)) {
951 ret = PTR_ERR(vaddr);
Tvrtko Ursulin82352e92016-01-15 17:12:45 +0000952 goto unpin_ctx_obj;
953 }
954
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100955 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
956
Chris Wilson9021ad02016-05-24 14:53:37 +0100957 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
Nick Hoathe84fe802015-09-11 12:53:46 +0100958 if (ret)
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100959 goto unpin_map;
Alex Daid1675192015-08-12 15:43:43 +0100960
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100961 i915_gem_context_reference(ctx);
Chris Wilson9021ad02016-05-24 14:53:37 +0100962 ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000963 intel_lr_context_descriptor_update(ctx, engine);
Chris Wilson9021ad02016-05-24 14:53:37 +0100964
965 lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
966 ce->lrc_reg_state = lrc_reg_state;
967 ce->state->dirty = true;
Daniel Vettere93c28f2015-09-02 14:33:42 +0200968
Nick Hoathe84fe802015-09-11 12:53:46 +0100969 /* Invalidate GuC TLB. */
970 if (i915.enable_guc_submission)
971 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
Oscar Mateodcb4c122014-11-13 10:28:10 +0000972
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100973 return 0;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000974
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100975unpin_map:
Chris Wilson9021ad02016-05-24 14:53:37 +0100976 i915_gem_object_unpin_map(ce->state);
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000977unpin_ctx_obj:
Chris Wilson9021ad02016-05-24 14:53:37 +0100978 i915_gem_object_ggtt_unpin(ce->state);
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100979err:
Chris Wilson9021ad02016-05-24 14:53:37 +0100980 ce->pin_count = 0;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000981 return ret;
Oscar Mateodcb4c122014-11-13 10:28:10 +0000982}
983
Chris Wilsone2efd132016-05-24 14:53:34 +0100984void intel_lr_context_unpin(struct i915_gem_context *ctx,
Tvrtko Ursuline52928232016-01-28 10:29:54 +0000985 struct intel_engine_cs *engine)
Oscar Mateodcb4c122014-11-13 10:28:10 +0000986{
Chris Wilson9021ad02016-05-24 14:53:37 +0100987 struct intel_context *ce = &ctx->engine[engine->id];
Daniel Vetteraf3302b2015-12-04 17:27:15 +0100988
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100989 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
Chris Wilson9021ad02016-05-24 14:53:37 +0100990 GEM_BUG_ON(ce->pin_count == 0);
Tvrtko Ursulin321fe302016-01-28 10:29:55 +0000991
Chris Wilson9021ad02016-05-24 14:53:37 +0100992 if (--ce->pin_count)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100993 return;
994
Chris Wilson9021ad02016-05-24 14:53:37 +0100995 intel_unpin_ringbuffer_obj(ce->ringbuf);
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100996
Chris Wilson9021ad02016-05-24 14:53:37 +0100997 i915_gem_object_unpin_map(ce->state);
998 i915_gem_object_ggtt_unpin(ce->state);
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100999
Chris Wilson9021ad02016-05-24 14:53:37 +01001000 ce->lrc_vma = NULL;
1001 ce->lrc_desc = 0;
1002 ce->lrc_reg_state = NULL;
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001003
1004 i915_gem_context_unreference(ctx);
Oscar Mateodcb4c122014-11-13 10:28:10 +00001005}
1006
John Harrisone2be4fa2015-05-29 17:43:54 +01001007static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
Michel Thierry771b9a52014-11-11 16:47:33 +00001008{
1009 int ret, i;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001010 struct intel_engine_cs *engine = req->engine;
John Harrisone2be4fa2015-05-29 17:43:54 +01001011 struct intel_ringbuffer *ringbuf = req->ringbuf;
Chris Wilsonc0336662016-05-06 15:40:21 +01001012 struct i915_workarounds *w = &req->i915->workarounds;
Michel Thierry771b9a52014-11-11 16:47:33 +00001013
Boyer, Waynecd7feaa2016-01-06 17:15:29 -08001014 if (w->count == 0)
Michel Thierry771b9a52014-11-11 16:47:33 +00001015 return 0;
1016
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001017 engine->gpu_caches_dirty = true;
John Harrison4866d722015-05-29 17:43:55 +01001018 ret = logical_ring_flush_all_caches(req);
Michel Thierry771b9a52014-11-11 16:47:33 +00001019 if (ret)
1020 return ret;
1021
Chris Wilson987046a2016-04-28 09:56:46 +01001022 ret = intel_ring_begin(req, w->count * 2 + 2);
Michel Thierry771b9a52014-11-11 16:47:33 +00001023 if (ret)
1024 return ret;
1025
1026 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1027 for (i = 0; i < w->count; i++) {
Ville Syrjäläf92a9162015-11-04 23:20:07 +02001028 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
Michel Thierry771b9a52014-11-11 16:47:33 +00001029 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1030 }
1031 intel_logical_ring_emit(ringbuf, MI_NOOP);
1032
1033 intel_logical_ring_advance(ringbuf);
1034
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001035 engine->gpu_caches_dirty = true;
John Harrison4866d722015-05-29 17:43:55 +01001036 ret = logical_ring_flush_all_caches(req);
Michel Thierry771b9a52014-11-11 16:47:33 +00001037 if (ret)
1038 return ret;
1039
1040 return 0;
1041}
1042
Arun Siluvery83b8a982015-07-08 10:27:05 +01001043#define wa_ctx_emit(batch, index, cmd) \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001044 do { \
Arun Siluvery83b8a982015-07-08 10:27:05 +01001045 int __index = (index)++; \
1046 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001047 return -ENOSPC; \
1048 } \
Arun Siluvery83b8a982015-07-08 10:27:05 +01001049 batch[__index] = (cmd); \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001050 } while (0)
1051
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001052#define wa_ctx_emit_reg(batch, index, reg) \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001053 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
Arun Siluvery9e000842015-07-03 14:27:31 +01001054
1055/*
1056 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1057 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1058 * but there is a slight complication as this is applied in WA batch where the
1059 * values are only initialized once so we cannot take register value at the
1060 * beginning and reuse it further; hence we save its value to memory, upload a
1061 * constant value with bit21 set and then we restore it back with the saved value.
1062 * To simplify the WA, a constant value is formed by using the default value
1063 * of this register. This shouldn't be a problem because we are only modifying
1064 * it for a short period and this batch in non-premptible. We can ofcourse
1065 * use additional instructions that read the actual value of the register
1066 * at that time and set our bit of interest but it makes the WA complicated.
1067 *
1068 * This WA is also required for Gen9 so extracting as a function avoids
1069 * code duplication.
1070 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001071static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
Arun Siluvery9e000842015-07-03 14:27:31 +01001072 uint32_t *const batch,
1073 uint32_t index)
1074{
1075 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1076
Arun Siluverya4106a72015-07-14 15:01:29 +01001077 /*
Mika Kuoppalafe905812016-06-07 17:19:03 +03001078 * WaDisableLSQCROPERFforOCL:skl,kbl
Arun Siluverya4106a72015-07-14 15:01:29 +01001079 * This WA is implemented in skl_init_clock_gating() but since
1080 * this batch updates GEN8_L3SQCREG4 with default value we need to
1081 * set this bit here to retain the WA during flush.
1082 */
Mika Kuoppalafe905812016-06-07 17:19:03 +03001083 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0) ||
1084 IS_KBL_REVID(engine->i915, 0, KBL_REVID_E0))
Arun Siluverya4106a72015-07-14 15:01:29 +01001085 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1086
Arun Siluveryf1afe242015-08-04 16:22:20 +01001087 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
Arun Siluvery83b8a982015-07-08 10:27:05 +01001088 MI_SRM_LRM_GLOBAL_GTT));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001089 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001090 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001091 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001092
Arun Siluvery83b8a982015-07-08 10:27:05 +01001093 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001094 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001095 wa_ctx_emit(batch, index, l3sqc4_flush);
Arun Siluvery9e000842015-07-03 14:27:31 +01001096
Arun Siluvery83b8a982015-07-08 10:27:05 +01001097 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1098 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
1099 PIPE_CONTROL_DC_FLUSH_ENABLE));
1100 wa_ctx_emit(batch, index, 0);
1101 wa_ctx_emit(batch, index, 0);
1102 wa_ctx_emit(batch, index, 0);
1103 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001104
Arun Siluveryf1afe242015-08-04 16:22:20 +01001105 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
Arun Siluvery83b8a982015-07-08 10:27:05 +01001106 MI_SRM_LRM_GLOBAL_GTT));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001107 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001108 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001109 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001110
1111 return index;
1112}
1113
Arun Siluvery17ee9502015-06-19 19:07:01 +01001114static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
1115 uint32_t offset,
1116 uint32_t start_alignment)
1117{
1118 return wa_ctx->offset = ALIGN(offset, start_alignment);
1119}
1120
1121static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1122 uint32_t offset,
1123 uint32_t size_alignment)
1124{
1125 wa_ctx->size = offset - wa_ctx->offset;
1126
1127 WARN(wa_ctx->size % size_alignment,
1128 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1129 wa_ctx->size, size_alignment);
1130 return 0;
1131}
1132
1133/**
1134 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
1135 *
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001136 * @engine: only applicable for RCS
Arun Siluvery17ee9502015-06-19 19:07:01 +01001137 * @wa_ctx: structure representing wa_ctx
1138 * offset: specifies start of the batch, should be cache-aligned. This is updated
1139 * with the offset value received as input.
1140 * size: size of the batch in DWORDS but HW expects in terms of cachelines
1141 * @batch: page in which WA are loaded
1142 * @offset: This field specifies the start of the batch, it should be
1143 * cache-aligned otherwise it is adjusted accordingly.
1144 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1145 * initialized at the beginning and shared across all contexts but this field
1146 * helps us to have multiple batches at different offsets and select them based
1147 * on a criteria. At the moment this batch always start at the beginning of the page
1148 * and at this point we don't have multiple wa_ctx batch buffers.
1149 *
1150 * The number of WA applied are not known at the beginning; we use this field
1151 * to return the no of DWORDS written.
Arun Siluvery4d78c8d2015-06-23 15:50:43 +01001152 *
Arun Siluvery17ee9502015-06-19 19:07:01 +01001153 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1154 * so it adds NOOPs as padding to make it cacheline aligned.
1155 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1156 * makes a complete batch buffer.
1157 *
1158 * Return: non-zero if we exceed the PAGE_SIZE limit.
1159 */
1160
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001161static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001162 struct i915_wa_ctx_bb *wa_ctx,
1163 uint32_t *const batch,
1164 uint32_t *offset)
1165{
Arun Siluvery0160f052015-06-23 15:46:57 +01001166 uint32_t scratch_addr;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001167 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1168
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001169 /* WaDisableCtxRestoreArbitration:bdw,chv */
Arun Siluvery83b8a982015-07-08 10:27:05 +01001170 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001171
Arun Siluveryc82435b2015-06-19 18:37:13 +01001172 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
Chris Wilsonc0336662016-05-06 15:40:21 +01001173 if (IS_BROADWELL(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001174 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
Andrzej Hajda604ef732015-09-21 15:33:35 +02001175 if (rc < 0)
1176 return rc;
1177 index = rc;
Arun Siluveryc82435b2015-06-19 18:37:13 +01001178 }
1179
Arun Siluvery0160f052015-06-23 15:46:57 +01001180 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1181 /* Actual scratch location is at 128 bytes offset */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001182 scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
Arun Siluvery0160f052015-06-23 15:46:57 +01001183
Arun Siluvery83b8a982015-07-08 10:27:05 +01001184 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1185 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1186 PIPE_CONTROL_GLOBAL_GTT_IVB |
1187 PIPE_CONTROL_CS_STALL |
1188 PIPE_CONTROL_QW_WRITE));
1189 wa_ctx_emit(batch, index, scratch_addr);
1190 wa_ctx_emit(batch, index, 0);
1191 wa_ctx_emit(batch, index, 0);
1192 wa_ctx_emit(batch, index, 0);
Arun Siluvery0160f052015-06-23 15:46:57 +01001193
Arun Siluvery17ee9502015-06-19 19:07:01 +01001194 /* Pad to end of cacheline */
1195 while (index % CACHELINE_DWORDS)
Arun Siluvery83b8a982015-07-08 10:27:05 +01001196 wa_ctx_emit(batch, index, MI_NOOP);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001197
1198 /*
1199 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1200 * execution depends on the length specified in terms of cache lines
1201 * in the register CTX_RCS_INDIRECT_CTX
1202 */
1203
1204 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1205}
1206
1207/**
1208 * gen8_init_perctx_bb() - initialize per ctx batch with WA
1209 *
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001210 * @engine: only applicable for RCS
Arun Siluvery17ee9502015-06-19 19:07:01 +01001211 * @wa_ctx: structure representing wa_ctx
1212 * offset: specifies start of the batch, should be cache-aligned.
1213 * size: size of the batch in DWORDS but HW expects in terms of cachelines
Arun Siluvery4d78c8d2015-06-23 15:50:43 +01001214 * @batch: page in which WA are loaded
Arun Siluvery17ee9502015-06-19 19:07:01 +01001215 * @offset: This field specifies the start of this batch.
1216 * This batch is started immediately after indirect_ctx batch. Since we ensure
1217 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
1218 *
1219 * The number of DWORDS written are returned using this field.
1220 *
1221 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1222 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1223 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001224static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001225 struct i915_wa_ctx_bb *wa_ctx,
1226 uint32_t *const batch,
1227 uint32_t *offset)
1228{
1229 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1230
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001231 /* WaDisableCtxRestoreArbitration:bdw,chv */
Arun Siluvery83b8a982015-07-08 10:27:05 +01001232 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001233
Arun Siluvery83b8a982015-07-08 10:27:05 +01001234 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001235
1236 return wa_ctx_end(wa_ctx, *offset = index, 1);
1237}
1238
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001239static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001240 struct i915_wa_ctx_bb *wa_ctx,
1241 uint32_t *const batch,
1242 uint32_t *offset)
1243{
Arun Siluverya4106a72015-07-14 15:01:29 +01001244 int ret;
Arun Siluvery0504cff2015-07-14 15:01:27 +01001245 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1246
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001247 /* WaDisableCtxRestoreArbitration:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001248 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1249 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001250 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
Arun Siluvery0504cff2015-07-14 15:01:27 +01001251
Arun Siluverya4106a72015-07-14 15:01:29 +01001252 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001253 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
Arun Siluverya4106a72015-07-14 15:01:29 +01001254 if (ret < 0)
1255 return ret;
1256 index = ret;
1257
Arun Siluvery0504cff2015-07-14 15:01:27 +01001258 /* Pad to end of cacheline */
1259 while (index % CACHELINE_DWORDS)
1260 wa_ctx_emit(batch, index, MI_NOOP);
1261
1262 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1263}
1264
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001265static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001266 struct i915_wa_ctx_bb *wa_ctx,
1267 uint32_t *const batch,
1268 uint32_t *offset)
1269{
1270 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1271
Arun Siluvery9b014352015-07-14 15:01:30 +01001272 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001273 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1274 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
Arun Siluvery9b014352015-07-14 15:01:30 +01001275 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001276 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
Arun Siluvery9b014352015-07-14 15:01:30 +01001277 wa_ctx_emit(batch, index,
1278 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1279 wa_ctx_emit(batch, index, MI_NOOP);
1280 }
1281
Tim Goreb1e429f2016-03-21 14:37:29 +00001282 /* WaClearTdlStateAckDirtyBits:bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001283 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
Tim Goreb1e429f2016-03-21 14:37:29 +00001284 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1285
1286 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
1287 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1288
1289 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
1290 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1291
1292 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
1293 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1294
1295 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
1296 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
1297 wa_ctx_emit(batch, index, 0x0);
1298 wa_ctx_emit(batch, index, MI_NOOP);
1299 }
1300
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001301 /* WaDisableCtxRestoreArbitration:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001302 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1303 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001304 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1305
Arun Siluvery0504cff2015-07-14 15:01:27 +01001306 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1307
1308 return wa_ctx_end(wa_ctx, *offset = index, 1);
1309}
1310
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001311static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001312{
1313 int ret;
1314
Chris Wilsonc0336662016-05-06 15:40:21 +01001315 engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001316 PAGE_ALIGN(size));
Chris Wilsonfe3db792016-04-25 13:32:13 +01001317 if (IS_ERR(engine->wa_ctx.obj)) {
Arun Siluvery17ee9502015-06-19 19:07:01 +01001318 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
Chris Wilsonfe3db792016-04-25 13:32:13 +01001319 ret = PTR_ERR(engine->wa_ctx.obj);
1320 engine->wa_ctx.obj = NULL;
1321 return ret;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001322 }
1323
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001324 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001325 if (ret) {
1326 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1327 ret);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001328 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001329 return ret;
1330 }
1331
1332 return 0;
1333}
1334
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001335static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001336{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001337 if (engine->wa_ctx.obj) {
1338 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
1339 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1340 engine->wa_ctx.obj = NULL;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001341 }
1342}
1343
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001344static int intel_init_workaround_bb(struct intel_engine_cs *engine)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001345{
1346 int ret;
1347 uint32_t *batch;
1348 uint32_t offset;
1349 struct page *page;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001350 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001351
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001352 WARN_ON(engine->id != RCS);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001353
Arun Siluvery5e60d792015-06-23 15:50:44 +01001354 /* update this when WA for higher Gen are added */
Chris Wilsonc0336662016-05-06 15:40:21 +01001355 if (INTEL_GEN(engine->i915) > 9) {
Arun Siluvery0504cff2015-07-14 15:01:27 +01001356 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
Chris Wilsonc0336662016-05-06 15:40:21 +01001357 INTEL_GEN(engine->i915));
Arun Siluvery5e60d792015-06-23 15:50:44 +01001358 return 0;
Arun Siluvery0504cff2015-07-14 15:01:27 +01001359 }
Arun Siluvery5e60d792015-06-23 15:50:44 +01001360
Arun Siluveryc4db7592015-06-19 18:37:11 +01001361 /* some WA perform writes to scratch page, ensure it is valid */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001362 if (engine->scratch.obj == NULL) {
1363 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
Arun Siluveryc4db7592015-06-19 18:37:11 +01001364 return -EINVAL;
1365 }
1366
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001367 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001368 if (ret) {
1369 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1370 return ret;
1371 }
1372
Dave Gordon033908a2015-12-10 18:51:23 +00001373 page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001374 batch = kmap_atomic(page);
1375 offset = 0;
1376
Chris Wilsonc0336662016-05-06 15:40:21 +01001377 if (IS_GEN8(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001378 ret = gen8_init_indirectctx_bb(engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001379 &wa_ctx->indirect_ctx,
1380 batch,
1381 &offset);
1382 if (ret)
1383 goto out;
1384
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001385 ret = gen8_init_perctx_bb(engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001386 &wa_ctx->per_ctx,
1387 batch,
1388 &offset);
1389 if (ret)
1390 goto out;
Chris Wilsonc0336662016-05-06 15:40:21 +01001391 } else if (IS_GEN9(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001392 ret = gen9_init_indirectctx_bb(engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001393 &wa_ctx->indirect_ctx,
1394 batch,
1395 &offset);
1396 if (ret)
1397 goto out;
1398
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001399 ret = gen9_init_perctx_bb(engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001400 &wa_ctx->per_ctx,
1401 batch,
1402 &offset);
1403 if (ret)
1404 goto out;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001405 }
1406
1407out:
1408 kunmap_atomic(batch);
1409 if (ret)
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001410 lrc_destroy_wa_ctx_obj(engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001411
1412 return ret;
1413}
1414
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001415static void lrc_init_hws(struct intel_engine_cs *engine)
1416{
Chris Wilsonc0336662016-05-06 15:40:21 +01001417 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001418
1419 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1420 (u32)engine->status_page.gfx_addr);
1421 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1422}
1423
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001424static int gen8_init_common_ring(struct intel_engine_cs *engine)
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001425{
Chris Wilsonc0336662016-05-06 15:40:21 +01001426 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +00001427 unsigned int next_context_status_buffer_hw;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001428
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001429 lrc_init_hws(engine);
Nick Hoathe84fe802015-09-11 12:53:46 +01001430
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001431 I915_WRITE_IMR(engine,
1432 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1433 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
Oscar Mateo73d477f2014-07-24 17:04:31 +01001434
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001435 I915_WRITE(RING_MODE_GEN7(engine),
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001436 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1437 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001438 POSTING_READ(RING_MODE_GEN7(engine));
Michel Thierrydfc53c52015-09-28 13:25:12 +01001439
1440 /*
1441 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1442 * zero, we need to read the write pointer from hardware and use its
1443 * value because "this register is power context save restored".
1444 * Effectively, these states have been observed:
1445 *
1446 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1447 * BDW | CSB regs not reset | CSB regs reset |
1448 * CHT | CSB regs not reset | CSB regs not reset |
Ben Widawsky5590a5f2016-01-05 10:30:05 -08001449 * SKL | ? | ? |
1450 * BXT | ? | ? |
Michel Thierrydfc53c52015-09-28 13:25:12 +01001451 */
Ben Widawsky5590a5f2016-01-05 10:30:05 -08001452 next_context_status_buffer_hw =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001453 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
Michel Thierrydfc53c52015-09-28 13:25:12 +01001454
1455 /*
1456 * When the CSB registers are reset (also after power-up / gpu reset),
1457 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1458 * this special case, so the first element read is CSB[0].
1459 */
1460 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1461 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1462
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001463 engine->next_context_status_buffer = next_context_status_buffer_hw;
1464 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001465
Tomas Elffc0768c2016-03-21 16:26:59 +00001466 intel_engine_init_hangcheck(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001467
Peter Antoine0ccdacf2016-04-13 15:03:25 +01001468 return intel_mocs_init_engine(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001469}
1470
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001471static int gen8_init_render_ring(struct intel_engine_cs *engine)
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001472{
Chris Wilsonc0336662016-05-06 15:40:21 +01001473 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001474 int ret;
1475
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001476 ret = gen8_init_common_ring(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001477 if (ret)
1478 return ret;
1479
1480 /* We need to disable the AsyncFlip performance optimisations in order
1481 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1482 * programmed to '1' on all products.
1483 *
1484 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1485 */
1486 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1487
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001488 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1489
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001490 return init_workarounds_ring(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001491}
1492
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001493static int gen9_init_render_ring(struct intel_engine_cs *engine)
Damien Lespiau82ef8222015-02-09 19:33:08 +00001494{
1495 int ret;
1496
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001497 ret = gen8_init_common_ring(engine);
Damien Lespiau82ef8222015-02-09 19:33:08 +00001498 if (ret)
1499 return ret;
1500
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001501 return init_workarounds_ring(engine);
Damien Lespiau82ef8222015-02-09 19:33:08 +00001502}
1503
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001504static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1505{
1506 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001507 struct intel_engine_cs *engine = req->engine;
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001508 struct intel_ringbuffer *ringbuf = req->ringbuf;
1509 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1510 int i, ret;
1511
Chris Wilson987046a2016-04-28 09:56:46 +01001512 ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001513 if (ret)
1514 return ret;
1515
1516 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1517 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1518 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1519
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001520 intel_logical_ring_emit_reg(ringbuf,
1521 GEN8_RING_PDP_UDW(engine, i));
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001522 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001523 intel_logical_ring_emit_reg(ringbuf,
1524 GEN8_RING_PDP_LDW(engine, i));
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001525 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
1526 }
1527
1528 intel_logical_ring_emit(ringbuf, MI_NOOP);
1529 intel_logical_ring_advance(ringbuf);
1530
1531 return 0;
1532}
1533
John Harrisonbe795fc2015-05-29 17:44:03 +01001534static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
John Harrison8e004ef2015-02-13 11:48:10 +00001535 u64 offset, unsigned dispatch_flags)
Oscar Mateo15648582014-07-24 17:04:32 +01001536{
John Harrisonbe795fc2015-05-29 17:44:03 +01001537 struct intel_ringbuffer *ringbuf = req->ringbuf;
John Harrison8e004ef2015-02-13 11:48:10 +00001538 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
Oscar Mateo15648582014-07-24 17:04:32 +01001539 int ret;
1540
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001541 /* Don't rely in hw updating PDPs, specially in lite-restore.
1542 * Ideally, we should set Force PD Restore in ctx descriptor,
1543 * but we can't. Force Restore would be a second option, but
1544 * it is unsafe in case of lite-restore (because the ctx is
Michel Thierry2dba3232015-07-30 11:06:23 +01001545 * not idle). PML4 is allocated during ppgtt init so this is
1546 * not needed in 48-bit.*/
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001547 if (req->ctx->ppgtt &&
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001548 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001549 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
Chris Wilsonc0336662016-05-06 15:40:21 +01001550 !intel_vgpu_active(req->i915)) {
Michel Thierry2dba3232015-07-30 11:06:23 +01001551 ret = intel_logical_ring_emit_pdps(req);
1552 if (ret)
1553 return ret;
1554 }
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001555
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001556 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001557 }
1558
Chris Wilson987046a2016-04-28 09:56:46 +01001559 ret = intel_ring_begin(req, 4);
Oscar Mateo15648582014-07-24 17:04:32 +01001560 if (ret)
1561 return ret;
1562
1563 /* FIXME(BDW): Address space and security selectors. */
Abdiel Janulgue69225282015-06-16 13:39:42 +03001564 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
1565 (ppgtt<<8) |
1566 (dispatch_flags & I915_DISPATCH_RS ?
1567 MI_BATCH_RESOURCE_STREAMER : 0));
Oscar Mateo15648582014-07-24 17:04:32 +01001568 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
1569 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
1570 intel_logical_ring_emit(ringbuf, MI_NOOP);
1571 intel_logical_ring_advance(ringbuf);
1572
1573 return 0;
1574}
1575
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001576static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
Oscar Mateo73d477f2014-07-24 17:04:31 +01001577{
Chris Wilsonc0336662016-05-06 15:40:21 +01001578 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateo73d477f2014-07-24 17:04:31 +01001579 unsigned long flags;
1580
Daniel Vetter7cd512f2014-09-15 11:38:57 +02001581 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Oscar Mateo73d477f2014-07-24 17:04:31 +01001582 return false;
1583
1584 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001585 if (engine->irq_refcount++ == 0) {
1586 I915_WRITE_IMR(engine,
1587 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1588 POSTING_READ(RING_IMR(engine->mmio_base));
Oscar Mateo73d477f2014-07-24 17:04:31 +01001589 }
1590 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1591
1592 return true;
1593}
1594
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001595static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
Oscar Mateo73d477f2014-07-24 17:04:31 +01001596{
Chris Wilsonc0336662016-05-06 15:40:21 +01001597 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateo73d477f2014-07-24 17:04:31 +01001598 unsigned long flags;
1599
1600 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001601 if (--engine->irq_refcount == 0) {
1602 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1603 POSTING_READ(RING_IMR(engine->mmio_base));
Oscar Mateo73d477f2014-07-24 17:04:31 +01001604 }
1605 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1606}
1607
John Harrison7deb4d32015-05-29 17:43:59 +01001608static int gen8_emit_flush(struct drm_i915_gem_request *request,
Oscar Mateo47122742014-07-24 17:04:28 +01001609 u32 invalidate_domains,
1610 u32 unused)
1611{
John Harrison7deb4d32015-05-29 17:43:59 +01001612 struct intel_ringbuffer *ringbuf = request->ringbuf;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001613 struct intel_engine_cs *engine = ringbuf->engine;
Chris Wilsonc0336662016-05-06 15:40:21 +01001614 struct drm_i915_private *dev_priv = request->i915;
Oscar Mateo47122742014-07-24 17:04:28 +01001615 uint32_t cmd;
1616 int ret;
1617
Chris Wilson987046a2016-04-28 09:56:46 +01001618 ret = intel_ring_begin(request, 4);
Oscar Mateo47122742014-07-24 17:04:28 +01001619 if (ret)
1620 return ret;
1621
1622 cmd = MI_FLUSH_DW + 1;
1623
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001624 /* We always require a command barrier so that subsequent
1625 * commands, such as breadcrumb interrupts, are strictly ordered
1626 * wrt the contents of the write cache being flushed to memory
1627 * (and thus being coherent from the CPU).
1628 */
1629 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1630
1631 if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
1632 cmd |= MI_INVALIDATE_TLB;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001633 if (engine == &dev_priv->engine[VCS])
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001634 cmd |= MI_INVALIDATE_BSD;
Oscar Mateo47122742014-07-24 17:04:28 +01001635 }
1636
1637 intel_logical_ring_emit(ringbuf, cmd);
1638 intel_logical_ring_emit(ringbuf,
1639 I915_GEM_HWS_SCRATCH_ADDR |
1640 MI_FLUSH_DW_USE_GTT);
1641 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
1642 intel_logical_ring_emit(ringbuf, 0); /* value */
1643 intel_logical_ring_advance(ringbuf);
1644
1645 return 0;
1646}
1647
John Harrison7deb4d32015-05-29 17:43:59 +01001648static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
Oscar Mateo47122742014-07-24 17:04:28 +01001649 u32 invalidate_domains,
1650 u32 flush_domains)
1651{
John Harrison7deb4d32015-05-29 17:43:59 +01001652 struct intel_ringbuffer *ringbuf = request->ringbuf;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001653 struct intel_engine_cs *engine = ringbuf->engine;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001654 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001655 bool vf_flush_wa = false, dc_flush_wa = false;
Oscar Mateo47122742014-07-24 17:04:28 +01001656 u32 flags = 0;
1657 int ret;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001658 int len;
Oscar Mateo47122742014-07-24 17:04:28 +01001659
1660 flags |= PIPE_CONTROL_CS_STALL;
1661
1662 if (flush_domains) {
1663 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1664 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
Francisco Jerez965fd602016-01-13 18:59:39 -08001665 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
Chris Wilson40a24482015-08-21 16:08:41 +01001666 flags |= PIPE_CONTROL_FLUSH_ENABLE;
Oscar Mateo47122742014-07-24 17:04:28 +01001667 }
1668
1669 if (invalidate_domains) {
1670 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1671 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1672 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1673 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1674 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1675 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1676 flags |= PIPE_CONTROL_QW_WRITE;
1677 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
Oscar Mateo47122742014-07-24 17:04:28 +01001678
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001679 /*
1680 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1681 * pipe control.
1682 */
Chris Wilsonc0336662016-05-06 15:40:21 +01001683 if (IS_GEN9(request->i915))
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001684 vf_flush_wa = true;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001685
1686 /* WaForGAMHang:kbl */
1687 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
1688 dc_flush_wa = true;
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001689 }
Imre Deak9647ff32015-01-25 13:27:11 -08001690
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001691 len = 6;
1692
1693 if (vf_flush_wa)
1694 len += 6;
1695
1696 if (dc_flush_wa)
1697 len += 12;
1698
1699 ret = intel_ring_begin(request, len);
Oscar Mateo47122742014-07-24 17:04:28 +01001700 if (ret)
1701 return ret;
1702
Imre Deak9647ff32015-01-25 13:27:11 -08001703 if (vf_flush_wa) {
1704 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1705 intel_logical_ring_emit(ringbuf, 0);
1706 intel_logical_ring_emit(ringbuf, 0);
1707 intel_logical_ring_emit(ringbuf, 0);
1708 intel_logical_ring_emit(ringbuf, 0);
1709 intel_logical_ring_emit(ringbuf, 0);
1710 }
1711
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001712 if (dc_flush_wa) {
1713 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1714 intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
1715 intel_logical_ring_emit(ringbuf, 0);
1716 intel_logical_ring_emit(ringbuf, 0);
1717 intel_logical_ring_emit(ringbuf, 0);
1718 intel_logical_ring_emit(ringbuf, 0);
1719 }
1720
Oscar Mateo47122742014-07-24 17:04:28 +01001721 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1722 intel_logical_ring_emit(ringbuf, flags);
1723 intel_logical_ring_emit(ringbuf, scratch_addr);
1724 intel_logical_ring_emit(ringbuf, 0);
1725 intel_logical_ring_emit(ringbuf, 0);
1726 intel_logical_ring_emit(ringbuf, 0);
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001727
1728 if (dc_flush_wa) {
1729 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1730 intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
1731 intel_logical_ring_emit(ringbuf, 0);
1732 intel_logical_ring_emit(ringbuf, 0);
1733 intel_logical_ring_emit(ringbuf, 0);
1734 intel_logical_ring_emit(ringbuf, 0);
1735 }
1736
Oscar Mateo47122742014-07-24 17:04:28 +01001737 intel_logical_ring_advance(ringbuf);
1738
1739 return 0;
1740}
1741
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001742static u32 gen8_get_seqno(struct intel_engine_cs *engine)
Oscar Mateoe94e37a2014-07-24 17:04:25 +01001743{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001744 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
Oscar Mateoe94e37a2014-07-24 17:04:25 +01001745}
1746
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001747static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
Oscar Mateoe94e37a2014-07-24 17:04:25 +01001748{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001749 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
Oscar Mateoe94e37a2014-07-24 17:04:25 +01001750}
1751
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001752static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
Imre Deak319404d2015-08-14 18:35:27 +03001753{
Imre Deak319404d2015-08-14 18:35:27 +03001754 /*
1755 * On BXT A steppings there is a HW coherency issue whereby the
1756 * MI_STORE_DATA_IMM storing the completed request's seqno
1757 * occasionally doesn't invalidate the CPU cache. Work around this by
1758 * clflushing the corresponding cacheline whenever the caller wants
1759 * the coherency to be guaranteed. Note that this cacheline is known
1760 * to be clean at this point, since we only write it in
1761 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1762 * this clflush in practice becomes an invalidate operation.
1763 */
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001764 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
Imre Deak319404d2015-08-14 18:35:27 +03001765}
1766
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001767static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
Imre Deak319404d2015-08-14 18:35:27 +03001768{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001769 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
Imre Deak319404d2015-08-14 18:35:27 +03001770
1771 /* See bxt_a_get_seqno() explaining the reason for the clflush. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001772 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
Imre Deak319404d2015-08-14 18:35:27 +03001773}
1774
Chris Wilson7c17d372016-01-20 15:43:35 +02001775/*
1776 * Reserve space for 2 NOOPs at the end of each request to be
1777 * used as a workaround for not being allowed to do lite
1778 * restore with HEAD==TAIL (WaIdleLiteRestore).
1779 */
1780#define WA_TAIL_DWORDS 2
1781
John Harrisonc4e76632015-05-29 17:44:01 +01001782static int gen8_emit_request(struct drm_i915_gem_request *request)
Oscar Mateo4da46e12014-07-24 17:04:27 +01001783{
John Harrisonc4e76632015-05-29 17:44:01 +01001784 struct intel_ringbuffer *ringbuf = request->ringbuf;
Oscar Mateo4da46e12014-07-24 17:04:27 +01001785 int ret;
1786
Chris Wilson987046a2016-04-28 09:56:46 +01001787 ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001788 if (ret)
1789 return ret;
1790
Chris Wilson7c17d372016-01-20 15:43:35 +02001791 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1792 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
Oscar Mateo4da46e12014-07-24 17:04:27 +01001793
Oscar Mateo4da46e12014-07-24 17:04:27 +01001794 intel_logical_ring_emit(ringbuf,
Chris Wilson7c17d372016-01-20 15:43:35 +02001795 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1796 intel_logical_ring_emit(ringbuf,
Chris Wilsona58c01a2016-04-29 13:18:21 +01001797 intel_hws_seqno_address(request->engine) |
Chris Wilson7c17d372016-01-20 15:43:35 +02001798 MI_FLUSH_DW_USE_GTT);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001799 intel_logical_ring_emit(ringbuf, 0);
John Harrisonc4e76632015-05-29 17:44:01 +01001800 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
Oscar Mateo4da46e12014-07-24 17:04:27 +01001801 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1802 intel_logical_ring_emit(ringbuf, MI_NOOP);
Chris Wilson7c17d372016-01-20 15:43:35 +02001803 return intel_logical_ring_advance_and_submit(request);
1804}
Oscar Mateo4da46e12014-07-24 17:04:27 +01001805
Chris Wilson7c17d372016-01-20 15:43:35 +02001806static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1807{
1808 struct intel_ringbuffer *ringbuf = request->ringbuf;
1809 int ret;
1810
Chris Wilson987046a2016-04-28 09:56:46 +01001811 ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
Chris Wilson7c17d372016-01-20 15:43:35 +02001812 if (ret)
1813 return ret;
1814
Michał Winiarskice81a652016-04-12 15:51:55 +02001815 /* We're using qword write, seqno should be aligned to 8 bytes. */
1816 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1817
Chris Wilson7c17d372016-01-20 15:43:35 +02001818 /* w/a for post sync ops following a GPGPU operation we
1819 * need a prior CS_STALL, which is emitted by the flush
1820 * following the batch.
Michel Thierry53292cd2015-04-15 18:11:33 +01001821 */
Michał Winiarskice81a652016-04-12 15:51:55 +02001822 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
Chris Wilson7c17d372016-01-20 15:43:35 +02001823 intel_logical_ring_emit(ringbuf,
1824 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1825 PIPE_CONTROL_CS_STALL |
1826 PIPE_CONTROL_QW_WRITE));
Chris Wilsona58c01a2016-04-29 13:18:21 +01001827 intel_logical_ring_emit(ringbuf,
1828 intel_hws_seqno_address(request->engine));
Chris Wilson7c17d372016-01-20 15:43:35 +02001829 intel_logical_ring_emit(ringbuf, 0);
1830 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
Michał Winiarskice81a652016-04-12 15:51:55 +02001831 /* We're thrashing one dword of HWS. */
1832 intel_logical_ring_emit(ringbuf, 0);
Chris Wilson7c17d372016-01-20 15:43:35 +02001833 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
Michał Winiarskice81a652016-04-12 15:51:55 +02001834 intel_logical_ring_emit(ringbuf, MI_NOOP);
Chris Wilson7c17d372016-01-20 15:43:35 +02001835 return intel_logical_ring_advance_and_submit(request);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001836}
1837
John Harrisonbe013632015-05-29 17:43:45 +01001838static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
Damien Lespiaucef437a2015-02-10 19:32:19 +00001839{
Damien Lespiaucef437a2015-02-10 19:32:19 +00001840 struct render_state so;
Damien Lespiaucef437a2015-02-10 19:32:19 +00001841 int ret;
1842
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001843 ret = i915_gem_render_state_prepare(req->engine, &so);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001844 if (ret)
1845 return ret;
1846
1847 if (so.rodata == NULL)
1848 return 0;
1849
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001850 ret = req->engine->emit_bb_start(req, so.ggtt_offset,
John Harrisonbe013632015-05-29 17:43:45 +01001851 I915_DISPATCH_SECURE);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001852 if (ret)
1853 goto out;
1854
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001855 ret = req->engine->emit_bb_start(req,
Arun Siluvery84e81022015-07-20 10:46:10 +01001856 (so.ggtt_offset + so.aux_batch_offset),
1857 I915_DISPATCH_SECURE);
1858 if (ret)
1859 goto out;
1860
John Harrisonb2af0372015-05-29 17:43:50 +01001861 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001862
Damien Lespiaucef437a2015-02-10 19:32:19 +00001863out:
1864 i915_gem_render_state_fini(&so);
1865 return ret;
1866}
1867
John Harrison87531812015-05-29 17:43:44 +01001868static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
Thomas Daniele7778be2014-12-02 12:50:48 +00001869{
1870 int ret;
1871
John Harrisone2be4fa2015-05-29 17:43:54 +01001872 ret = intel_logical_ring_workarounds_emit(req);
Thomas Daniele7778be2014-12-02 12:50:48 +00001873 if (ret)
1874 return ret;
1875
Peter Antoine3bbaba02015-07-10 20:13:11 +03001876 ret = intel_rcs_context_init_mocs(req);
1877 /*
1878 * Failing to program the MOCS is non-fatal.The system will not
1879 * run at peak performance. So generate an error and carry on.
1880 */
1881 if (ret)
1882 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1883
John Harrisonbe013632015-05-29 17:43:45 +01001884 return intel_lr_context_render_state_init(req);
Thomas Daniele7778be2014-12-02 12:50:48 +00001885}
1886
Oscar Mateo73e4d072014-07-24 17:04:48 +01001887/**
1888 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1889 *
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001890 * @engine: Engine Command Streamer.
Oscar Mateo73e4d072014-07-24 17:04:48 +01001891 *
1892 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001893void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
Oscar Mateo454afeb2014-07-24 17:04:22 +01001894{
John Harrison6402c332014-10-31 12:00:26 +00001895 struct drm_i915_private *dev_priv;
Oscar Mateo9832b9d2014-07-24 17:04:30 +01001896
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00001897 if (!intel_engine_initialized(engine))
Oscar Mateo48d82382014-07-24 17:04:23 +01001898 return;
1899
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +01001900 /*
1901 * Tasklet cannot be active at this point due intel_mark_active/idle
1902 * so this is just for documentation.
1903 */
1904 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1905 tasklet_kill(&engine->irq_tasklet);
1906
Chris Wilsonc0336662016-05-06 15:40:21 +01001907 dev_priv = engine->i915;
John Harrison6402c332014-10-31 12:00:26 +00001908
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001909 if (engine->buffer) {
1910 intel_logical_ring_stop(engine);
1911 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
Dave Gordonb0366a52015-12-08 15:02:36 +00001912 }
Oscar Mateo48d82382014-07-24 17:04:23 +01001913
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001914 if (engine->cleanup)
1915 engine->cleanup(engine);
Oscar Mateo48d82382014-07-24 17:04:23 +01001916
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001917 i915_cmd_parser_fini_ring(engine);
1918 i915_gem_batch_pool_fini(&engine->batch_pool);
Oscar Mateo48d82382014-07-24 17:04:23 +01001919
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001920 if (engine->status_page.obj) {
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001921 i915_gem_object_unpin_map(engine->status_page.obj);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001922 engine->status_page.obj = NULL;
Oscar Mateo48d82382014-07-24 17:04:23 +01001923 }
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001924 intel_lr_context_unpin(dev_priv->kernel_context, engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001925
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001926 engine->idle_lite_restore_wa = 0;
1927 engine->disable_lite_restore_wa = false;
1928 engine->ctx_desc_template = 0;
Tvrtko Ursulinca825802016-01-15 15:10:27 +00001929
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001930 lrc_destroy_wa_ctx_obj(engine);
Chris Wilsonc0336662016-05-06 15:40:21 +01001931 engine->i915 = NULL;
Oscar Mateo454afeb2014-07-24 17:04:22 +01001932}
1933
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001934static void
Chris Wilsone1382ef2016-05-06 15:40:20 +01001935logical_ring_default_vfuncs(struct intel_engine_cs *engine)
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001936{
1937 /* Default vfuncs which can be overriden by each engine. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001938 engine->init_hw = gen8_init_common_ring;
1939 engine->emit_request = gen8_emit_request;
1940 engine->emit_flush = gen8_emit_flush;
1941 engine->irq_get = gen8_logical_ring_get_irq;
1942 engine->irq_put = gen8_logical_ring_put_irq;
1943 engine->emit_bb_start = gen8_emit_bb_start;
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001944 engine->get_seqno = gen8_get_seqno;
1945 engine->set_seqno = gen8_set_seqno;
Chris Wilsonc0336662016-05-06 15:40:21 +01001946 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001947 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001948 engine->set_seqno = bxt_a_set_seqno;
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001949 }
1950}
1951
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001952static inline void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001953logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001954{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001955 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1956 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
Chris Wilsone1382ef2016-05-06 15:40:20 +01001957 init_waitqueue_head(&engine->irq_queue);
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001958}
1959
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001960static int
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001961lrc_setup_hws(struct intel_engine_cs *engine,
1962 struct drm_i915_gem_object *dctx_obj)
1963{
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001964 void *hws;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001965
1966 /* The HWSP is part of the default context object in LRC mode. */
1967 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
1968 LRC_PPHWSP_PN * PAGE_SIZE;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001969 hws = i915_gem_object_pin_map(dctx_obj);
1970 if (IS_ERR(hws))
1971 return PTR_ERR(hws);
1972 engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001973 engine->status_page.obj = dctx_obj;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001974
1975 return 0;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001976}
1977
Chris Wilsone1382ef2016-05-06 15:40:20 +01001978static const struct logical_ring_info {
1979 const char *name;
1980 unsigned exec_id;
1981 unsigned guc_id;
1982 u32 mmio_base;
1983 unsigned irq_shift;
1984} logical_rings[] = {
1985 [RCS] = {
1986 .name = "render ring",
1987 .exec_id = I915_EXEC_RENDER,
1988 .guc_id = GUC_RENDER_ENGINE,
1989 .mmio_base = RENDER_RING_BASE,
1990 .irq_shift = GEN8_RCS_IRQ_SHIFT,
1991 },
1992 [BCS] = {
1993 .name = "blitter ring",
1994 .exec_id = I915_EXEC_BLT,
1995 .guc_id = GUC_BLITTER_ENGINE,
1996 .mmio_base = BLT_RING_BASE,
1997 .irq_shift = GEN8_BCS_IRQ_SHIFT,
1998 },
1999 [VCS] = {
2000 .name = "bsd ring",
2001 .exec_id = I915_EXEC_BSD,
2002 .guc_id = GUC_VIDEO_ENGINE,
2003 .mmio_base = GEN6_BSD_RING_BASE,
2004 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
2005 },
2006 [VCS2] = {
2007 .name = "bsd2 ring",
2008 .exec_id = I915_EXEC_BSD,
2009 .guc_id = GUC_VIDEO_ENGINE2,
2010 .mmio_base = GEN8_BSD2_RING_BASE,
2011 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
2012 },
2013 [VECS] = {
2014 .name = "video enhancement ring",
2015 .exec_id = I915_EXEC_VEBOX,
2016 .guc_id = GUC_VIDEOENHANCE_ENGINE,
2017 .mmio_base = VEBOX_RING_BASE,
2018 .irq_shift = GEN8_VECS_IRQ_SHIFT,
2019 },
2020};
2021
2022static struct intel_engine_cs *
2023logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
Oscar Mateo454afeb2014-07-24 17:04:22 +01002024{
Chris Wilsone1382ef2016-05-06 15:40:20 +01002025 const struct logical_ring_info *info = &logical_rings[id];
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002026 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsone1382ef2016-05-06 15:40:20 +01002027 struct intel_engine_cs *engine = &dev_priv->engine[id];
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002028 enum forcewake_domains fw_domains;
Chris Wilsone1382ef2016-05-06 15:40:20 +01002029
2030 engine->id = id;
2031 engine->name = info->name;
2032 engine->exec_id = info->exec_id;
2033 engine->guc_id = info->guc_id;
2034 engine->mmio_base = info->mmio_base;
2035
Chris Wilsonc0336662016-05-06 15:40:21 +01002036 engine->i915 = dev_priv;
Oscar Mateo48d82382014-07-24 17:04:23 +01002037
2038 /* Intentionally left blank. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002039 engine->buffer = NULL;
Oscar Mateo48d82382014-07-24 17:04:23 +01002040
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002041 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
2042 RING_ELSP(engine),
2043 FW_REG_WRITE);
2044
2045 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2046 RING_CONTEXT_STATUS_PTR(engine),
2047 FW_REG_READ | FW_REG_WRITE);
2048
2049 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2050 RING_CONTEXT_STATUS_BUF_BASE(engine),
2051 FW_REG_READ);
2052
2053 engine->fw_domains = fw_domains;
2054
Chris Wilsone1382ef2016-05-06 15:40:20 +01002055 INIT_LIST_HEAD(&engine->active_list);
2056 INIT_LIST_HEAD(&engine->request_list);
2057 INIT_LIST_HEAD(&engine->buffers);
2058 INIT_LIST_HEAD(&engine->execlist_queue);
2059 spin_lock_init(&engine->execlist_lock);
2060
2061 tasklet_init(&engine->irq_tasklet,
2062 intel_lrc_irq_handler, (unsigned long)engine);
2063
2064 logical_ring_init_platform_invariants(engine);
2065 logical_ring_default_vfuncs(engine);
2066 logical_ring_default_irqs(engine, info->irq_shift);
2067
2068 intel_engine_init_hangcheck(engine);
Chris Wilsonc0336662016-05-06 15:40:21 +01002069 i915_gem_batch_pool_init(dev, &engine->batch_pool);
Chris Wilsone1382ef2016-05-06 15:40:20 +01002070
2071 return engine;
2072}
2073
2074static int
2075logical_ring_init(struct intel_engine_cs *engine)
2076{
Chris Wilsone2efd132016-05-24 14:53:34 +01002077 struct i915_gem_context *dctx = engine->i915->kernel_context;
Chris Wilsone1382ef2016-05-06 15:40:20 +01002078 int ret;
2079
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002080 ret = i915_cmd_parser_init_ring(engine);
Oscar Mateo48d82382014-07-24 17:04:23 +01002081 if (ret)
Dave Gordonb0366a52015-12-08 15:02:36 +00002082 goto error;
Oscar Mateo48d82382014-07-24 17:04:23 +01002083
Chris Wilson978f1e02016-04-28 09:56:54 +01002084 ret = execlists_context_deferred_alloc(dctx, engine);
Nick Hoathe84fe802015-09-11 12:53:46 +01002085 if (ret)
Dave Gordonb0366a52015-12-08 15:02:36 +00002086 goto error;
Nick Hoathe84fe802015-09-11 12:53:46 +01002087
2088 /* As this is the default context, always pin it */
Chris Wilson24f1d3c2016-04-28 09:56:53 +01002089 ret = intel_lr_context_pin(dctx, engine);
Nick Hoathe84fe802015-09-11 12:53:46 +01002090 if (ret) {
Chris Wilson24f1d3c2016-04-28 09:56:53 +01002091 DRM_ERROR("Failed to pin context for %s: %d\n",
2092 engine->name, ret);
Dave Gordonb0366a52015-12-08 15:02:36 +00002093 goto error;
Nick Hoathe84fe802015-09-11 12:53:46 +01002094 }
Oscar Mateo564ddb22014-08-21 11:40:54 +01002095
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01002096 /* And setup the hardware status page. */
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002097 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
2098 if (ret) {
2099 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
2100 goto error;
2101 }
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01002102
Dave Gordonb0366a52015-12-08 15:02:36 +00002103 return 0;
2104
2105error:
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002106 intel_logical_ring_cleanup(engine);
Oscar Mateo564ddb22014-08-21 11:40:54 +01002107 return ret;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002108}
2109
2110static int logical_render_ring_init(struct drm_device *dev)
2111{
Chris Wilsone1382ef2016-05-06 15:40:20 +01002112 struct intel_engine_cs *engine = logical_ring_setup(dev, RCS);
Daniel Vetter99be1df2014-11-20 00:33:06 +01002113 int ret;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002114
Oscar Mateo73d477f2014-07-24 17:04:31 +01002115 if (HAS_L3_DPF(dev))
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002116 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002117
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00002118 /* Override some for render ring. */
Damien Lespiau82ef8222015-02-09 19:33:08 +00002119 if (INTEL_INFO(dev)->gen >= 9)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002120 engine->init_hw = gen9_init_render_ring;
Damien Lespiau82ef8222015-02-09 19:33:08 +00002121 else
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002122 engine->init_hw = gen8_init_render_ring;
2123 engine->init_context = gen8_init_rcs_context;
2124 engine->cleanup = intel_fini_pipe_control;
2125 engine->emit_flush = gen8_emit_flush_render;
2126 engine->emit_request = gen8_emit_request_render;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01002127
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002128 ret = intel_init_pipe_control(engine);
Daniel Vetter99be1df2014-11-20 00:33:06 +01002129 if (ret)
2130 return ret;
2131
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002132 ret = intel_init_workaround_bb(engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01002133 if (ret) {
2134 /*
2135 * We continue even if we fail to initialize WA batch
2136 * because we only expect rare glitches but nothing
2137 * critical to prevent us from using GPU
2138 */
2139 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2140 ret);
2141 }
2142
Chris Wilsone1382ef2016-05-06 15:40:20 +01002143 ret = logical_ring_init(engine);
Arun Siluveryc4db7592015-06-19 18:37:11 +01002144 if (ret) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002145 lrc_destroy_wa_ctx_obj(engine);
Arun Siluveryc4db7592015-06-19 18:37:11 +01002146 }
Arun Siluvery17ee9502015-06-19 19:07:01 +01002147
2148 return ret;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002149}
2150
2151static int logical_bsd_ring_init(struct drm_device *dev)
2152{
Chris Wilsone1382ef2016-05-06 15:40:20 +01002153 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002154
Chris Wilsone1382ef2016-05-06 15:40:20 +01002155 return logical_ring_init(engine);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002156}
2157
2158static int logical_bsd2_ring_init(struct drm_device *dev)
2159{
Chris Wilsone1382ef2016-05-06 15:40:20 +01002160 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002161
Chris Wilsone1382ef2016-05-06 15:40:20 +01002162 return logical_ring_init(engine);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002163}
2164
2165static int logical_blt_ring_init(struct drm_device *dev)
2166{
Chris Wilsone1382ef2016-05-06 15:40:20 +01002167 struct intel_engine_cs *engine = logical_ring_setup(dev, BCS);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002168
Chris Wilsone1382ef2016-05-06 15:40:20 +01002169 return logical_ring_init(engine);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002170}
2171
2172static int logical_vebox_ring_init(struct drm_device *dev)
2173{
Chris Wilsone1382ef2016-05-06 15:40:20 +01002174 struct intel_engine_cs *engine = logical_ring_setup(dev, VECS);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002175
Chris Wilsone1382ef2016-05-06 15:40:20 +01002176 return logical_ring_init(engine);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002177}
2178
Oscar Mateo73e4d072014-07-24 17:04:48 +01002179/**
2180 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
2181 * @dev: DRM device.
2182 *
2183 * This function inits the engines for an Execlists submission style (the equivalent in the
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00002184 * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
Oscar Mateo73e4d072014-07-24 17:04:48 +01002185 * those engines that are present in the hardware.
2186 *
2187 * Return: non-zero if the initialization failed.
2188 */
Oscar Mateo454afeb2014-07-24 17:04:22 +01002189int intel_logical_rings_init(struct drm_device *dev)
2190{
2191 struct drm_i915_private *dev_priv = dev->dev_private;
2192 int ret;
2193
2194 ret = logical_render_ring_init(dev);
2195 if (ret)
2196 return ret;
2197
2198 if (HAS_BSD(dev)) {
2199 ret = logical_bsd_ring_init(dev);
2200 if (ret)
2201 goto cleanup_render_ring;
2202 }
2203
2204 if (HAS_BLT(dev)) {
2205 ret = logical_blt_ring_init(dev);
2206 if (ret)
2207 goto cleanup_bsd_ring;
2208 }
2209
2210 if (HAS_VEBOX(dev)) {
2211 ret = logical_vebox_ring_init(dev);
2212 if (ret)
2213 goto cleanup_blt_ring;
2214 }
2215
2216 if (HAS_BSD2(dev)) {
2217 ret = logical_bsd2_ring_init(dev);
2218 if (ret)
2219 goto cleanup_vebox_ring;
2220 }
2221
Oscar Mateo454afeb2014-07-24 17:04:22 +01002222 return 0;
2223
Oscar Mateo454afeb2014-07-24 17:04:22 +01002224cleanup_vebox_ring:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002225 intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002226cleanup_blt_ring:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002227 intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002228cleanup_bsd_ring:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002229 intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002230cleanup_render_ring:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002231 intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002232
2233 return ret;
2234}
2235
Jeff McGee0cea6502015-02-13 10:27:56 -06002236static u32
Chris Wilsonc0336662016-05-06 15:40:21 +01002237make_rpcs(struct drm_i915_private *dev_priv)
Jeff McGee0cea6502015-02-13 10:27:56 -06002238{
2239 u32 rpcs = 0;
2240
2241 /*
2242 * No explicit RPCS request is needed to ensure full
2243 * slice/subslice/EU enablement prior to Gen9.
2244 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002245 if (INTEL_GEN(dev_priv) < 9)
Jeff McGee0cea6502015-02-13 10:27:56 -06002246 return 0;
2247
2248 /*
2249 * Starting in Gen9, render power gating can leave
2250 * slice/subslice/EU in a partially enabled state. We
2251 * must make an explicit request through RPCS for full
2252 * enablement.
2253 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002254 if (INTEL_INFO(dev_priv)->has_slice_pg) {
Jeff McGee0cea6502015-02-13 10:27:56 -06002255 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
Chris Wilsonc0336662016-05-06 15:40:21 +01002256 rpcs |= INTEL_INFO(dev_priv)->slice_total <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002257 GEN8_RPCS_S_CNT_SHIFT;
2258 rpcs |= GEN8_RPCS_ENABLE;
2259 }
2260
Chris Wilsonc0336662016-05-06 15:40:21 +01002261 if (INTEL_INFO(dev_priv)->has_subslice_pg) {
Jeff McGee0cea6502015-02-13 10:27:56 -06002262 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
Chris Wilsonc0336662016-05-06 15:40:21 +01002263 rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002264 GEN8_RPCS_SS_CNT_SHIFT;
2265 rpcs |= GEN8_RPCS_ENABLE;
2266 }
2267
Chris Wilsonc0336662016-05-06 15:40:21 +01002268 if (INTEL_INFO(dev_priv)->has_eu_pg) {
2269 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002270 GEN8_RPCS_EU_MIN_SHIFT;
Chris Wilsonc0336662016-05-06 15:40:21 +01002271 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002272 GEN8_RPCS_EU_MAX_SHIFT;
2273 rpcs |= GEN8_RPCS_ENABLE;
2274 }
2275
2276 return rpcs;
2277}
2278
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002279static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
Michel Thierry71562912016-02-23 10:31:49 +00002280{
2281 u32 indirect_ctx_offset;
2282
Chris Wilsonc0336662016-05-06 15:40:21 +01002283 switch (INTEL_GEN(engine->i915)) {
Michel Thierry71562912016-02-23 10:31:49 +00002284 default:
Chris Wilsonc0336662016-05-06 15:40:21 +01002285 MISSING_CASE(INTEL_GEN(engine->i915));
Michel Thierry71562912016-02-23 10:31:49 +00002286 /* fall through */
2287 case 9:
2288 indirect_ctx_offset =
2289 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2290 break;
2291 case 8:
2292 indirect_ctx_offset =
2293 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2294 break;
2295 }
2296
2297 return indirect_ctx_offset;
2298}
2299
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002300static int
Chris Wilsone2efd132016-05-24 14:53:34 +01002301populate_lr_context(struct i915_gem_context *ctx,
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002302 struct drm_i915_gem_object *ctx_obj,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002303 struct intel_engine_cs *engine,
2304 struct intel_ringbuffer *ringbuf)
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002305{
Chris Wilsonc0336662016-05-06 15:40:21 +01002306 struct drm_i915_private *dev_priv = ctx->i915;
Daniel Vetterae6c4802014-08-06 15:04:53 +02002307 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002308 void *vaddr;
2309 u32 *reg_state;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002310 int ret;
2311
Thomas Daniel2d965532014-08-19 10:13:36 +01002312 if (!ppgtt)
2313 ppgtt = dev_priv->mm.aliasing_ppgtt;
2314
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002315 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2316 if (ret) {
2317 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2318 return ret;
2319 }
2320
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002321 vaddr = i915_gem_object_pin_map(ctx_obj);
2322 if (IS_ERR(vaddr)) {
2323 ret = PTR_ERR(vaddr);
2324 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002325 return ret;
2326 }
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002327 ctx_obj->dirty = true;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002328
2329 /* The second page of the context object contains some fields which must
2330 * be set up prior to the first execution. */
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002331 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002332
2333 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2334 * commands followed by (reg, value) pairs. The values we are setting here are
2335 * only for the first context restore: on a subsequent save, the GPU will
2336 * recreate this batchbuffer with new values (including all the missing
2337 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002338 reg_state[CTX_LRI_HEADER_0] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002339 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2340 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
2341 RING_CONTEXT_CONTROL(engine),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002342 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2343 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
Chris Wilsonc0336662016-05-06 15:40:21 +01002344 (HAS_RESOURCE_STREAMER(dev_priv) ?
Michel Thierry99cf8ea2016-02-25 09:48:58 +00002345 CTX_CTRL_RS_CTX_ENABLE : 0)));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002346 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2347 0);
2348 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
2349 0);
Thomas Daniel7ba717c2014-11-13 10:28:56 +00002350 /* Ring buffer start address is not known until the buffer is pinned.
2351 * It is written to the context image in execlists_update_context()
2352 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002353 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
2354 RING_START(engine->mmio_base), 0);
2355 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
2356 RING_CTL(engine->mmio_base),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002357 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002358 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
2359 RING_BBADDR_UDW(engine->mmio_base), 0);
2360 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
2361 RING_BBADDR(engine->mmio_base), 0);
2362 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
2363 RING_BBSTATE(engine->mmio_base),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002364 RING_BB_PPGTT);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002365 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
2366 RING_SBBADDR_UDW(engine->mmio_base), 0);
2367 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
2368 RING_SBBADDR(engine->mmio_base), 0);
2369 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2370 RING_SBBSTATE(engine->mmio_base), 0);
2371 if (engine->id == RCS) {
2372 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2373 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2374 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2375 RING_INDIRECT_CTX(engine->mmio_base), 0);
2376 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2377 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2378 if (engine->wa_ctx.obj) {
2379 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
Arun Siluvery17ee9502015-06-19 19:07:01 +01002380 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2381
2382 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2383 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2384 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2385
2386 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002387 intel_lr_indirect_ctx_offset(engine) << 6;
Arun Siluvery17ee9502015-06-19 19:07:01 +01002388
2389 reg_state[CTX_BB_PER_CTX_PTR+1] =
2390 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2391 0x01;
2392 }
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002393 }
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002394 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002395 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2396 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002397 /* PDP values well be assigned later if needed */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002398 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2399 0);
2400 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2401 0);
2402 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2403 0);
2404 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2405 0);
2406 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2407 0);
2408 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2409 0);
2410 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2411 0);
2412 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2413 0);
Michel Thierryd7b26332015-04-08 12:13:34 +01002414
Michel Thierry2dba3232015-07-30 11:06:23 +01002415 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2416 /* 64b PPGTT (48bit canonical)
2417 * PDP0_DESCRIPTOR contains the base address to PML4 and
2418 * other PDP Descriptors are ignored.
2419 */
2420 ASSIGN_CTX_PML4(ppgtt, reg_state);
2421 } else {
2422 /* 32b PPGTT
2423 * PDP*_DESCRIPTOR contains the base address of space supported.
2424 * With dynamic page allocation, PDPs may not be allocated at
2425 * this point. Point the unallocated PDPs to the scratch page
2426 */
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +00002427 execlists_update_context_pdps(ppgtt, reg_state);
Michel Thierry2dba3232015-07-30 11:06:23 +01002428 }
2429
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002430 if (engine->id == RCS) {
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002431 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002432 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
Chris Wilsonc0336662016-05-06 15:40:21 +01002433 make_rpcs(dev_priv));
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002434 }
2435
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002436 i915_gem_object_unpin_map(ctx_obj);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002437
2438 return 0;
2439}
2440
Oscar Mateo73e4d072014-07-24 17:04:48 +01002441/**
Dave Gordonc5d46ee2016-01-05 12:21:33 +00002442 * intel_lr_context_size() - return the size of the context for an engine
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002443 * @engine: which engine to find the context size for
Dave Gordonc5d46ee2016-01-05 12:21:33 +00002444 *
2445 * Each engine may require a different amount of space for a context image,
2446 * so when allocating (or copying) an image, this function can be used to
2447 * find the right size for the specific engine.
2448 *
2449 * Return: size (in bytes) of an engine-specific context image
2450 *
2451 * Note: this size includes the HWSP, which is part of the context image
2452 * in LRC mode, but does not include the "shared data page" used with
2453 * GuC submission. The caller should account for this if using the GuC.
2454 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002455uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
Oscar Mateo8c8579172014-07-24 17:04:14 +01002456{
2457 int ret = 0;
2458
Chris Wilsonc0336662016-05-06 15:40:21 +01002459 WARN_ON(INTEL_GEN(engine->i915) < 8);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002460
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002461 switch (engine->id) {
Oscar Mateo8c8579172014-07-24 17:04:14 +01002462 case RCS:
Chris Wilsonc0336662016-05-06 15:40:21 +01002463 if (INTEL_GEN(engine->i915) >= 9)
Michael H. Nguyen468c6812014-11-13 17:51:49 +00002464 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2465 else
2466 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002467 break;
2468 case VCS:
2469 case BCS:
2470 case VECS:
2471 case VCS2:
2472 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2473 break;
2474 }
2475
2476 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01002477}
2478
Oscar Mateo73e4d072014-07-24 17:04:48 +01002479/**
Chris Wilson978f1e02016-04-28 09:56:54 +01002480 * execlists_context_deferred_alloc() - create the LRC specific bits of a context
Oscar Mateo73e4d072014-07-24 17:04:48 +01002481 * @ctx: LR context to create.
Chris Wilson978f1e02016-04-28 09:56:54 +01002482 * @engine: engine to be used with the context.
Oscar Mateo73e4d072014-07-24 17:04:48 +01002483 *
2484 * This function can be called more than once, with different engines, if we plan
2485 * to use the context with them. The context backing objects and the ringbuffers
2486 * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
2487 * the creation is a deferred call: it's better to make sure first that we need to use
2488 * a given ring with the context.
2489 *
Masanari Iida32197aa2014-10-20 23:53:13 +09002490 * Return: non-zero on error.
Oscar Mateo73e4d072014-07-24 17:04:48 +01002491 */
Chris Wilsone2efd132016-05-24 14:53:34 +01002492static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
Chris Wilson978f1e02016-04-28 09:56:54 +01002493 struct intel_engine_cs *engine)
Oscar Mateoede7d422014-07-24 17:04:12 +01002494{
Oscar Mateo8c8579172014-07-24 17:04:14 +01002495 struct drm_i915_gem_object *ctx_obj;
Chris Wilson9021ad02016-05-24 14:53:37 +01002496 struct intel_context *ce = &ctx->engine[engine->id];
Oscar Mateo8c8579172014-07-24 17:04:14 +01002497 uint32_t context_size;
Oscar Mateo84c23772014-07-24 17:04:15 +01002498 struct intel_ringbuffer *ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002499 int ret;
2500
Chris Wilson9021ad02016-05-24 14:53:37 +01002501 WARN_ON(ce->state);
Oscar Mateoede7d422014-07-24 17:04:12 +01002502
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002503 context_size = round_up(intel_lr_context_size(engine), 4096);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002504
Alex Daid1675192015-08-12 15:43:43 +01002505 /* One extra page as the sharing data between driver and GuC */
2506 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2507
Chris Wilsonc0336662016-05-06 15:40:21 +01002508 ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
Chris Wilsonfe3db792016-04-25 13:32:13 +01002509 if (IS_ERR(ctx_obj)) {
Dan Carpenter3126a662015-04-30 17:30:50 +03002510 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
Chris Wilsonfe3db792016-04-25 13:32:13 +01002511 return PTR_ERR(ctx_obj);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002512 }
2513
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002514 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
Chris Wilson01101fa2015-09-03 13:01:39 +01002515 if (IS_ERR(ringbuf)) {
2516 ret = PTR_ERR(ringbuf);
Nick Hoathe84fe802015-09-11 12:53:46 +01002517 goto error_deref_obj;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002518 }
2519
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002520 ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002521 if (ret) {
2522 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
Nick Hoathe84fe802015-09-11 12:53:46 +01002523 goto error_ringbuf;
Oscar Mateo84c23772014-07-24 17:04:15 +01002524 }
2525
Chris Wilson9021ad02016-05-24 14:53:37 +01002526 ce->ringbuf = ringbuf;
2527 ce->state = ctx_obj;
2528 ce->initialised = engine->init_context == NULL;
Oscar Mateoede7d422014-07-24 17:04:12 +01002529
2530 return 0;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002531
Chris Wilson01101fa2015-09-03 13:01:39 +01002532error_ringbuf:
2533 intel_ringbuffer_free(ringbuf);
Nick Hoathe84fe802015-09-11 12:53:46 +01002534error_deref_obj:
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002535 drm_gem_object_unreference(&ctx_obj->base);
Chris Wilson9021ad02016-05-24 14:53:37 +01002536 ce->ringbuf = NULL;
2537 ce->state = NULL;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002538 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01002539}
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002540
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002541void intel_lr_context_reset(struct drm_i915_private *dev_priv,
Chris Wilsone2efd132016-05-24 14:53:34 +01002542 struct i915_gem_context *ctx)
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002543{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002544 struct intel_engine_cs *engine;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002545
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002546 for_each_engine(engine, dev_priv) {
Chris Wilson9021ad02016-05-24 14:53:37 +01002547 struct intel_context *ce = &ctx->engine[engine->id];
2548 struct drm_i915_gem_object *ctx_obj = ce->state;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002549 void *vaddr;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002550 uint32_t *reg_state;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002551
2552 if (!ctx_obj)
2553 continue;
2554
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002555 vaddr = i915_gem_object_pin_map(ctx_obj);
2556 if (WARN_ON(IS_ERR(vaddr)))
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002557 continue;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002558
2559 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2560 ctx_obj->dirty = true;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002561
2562 reg_state[CTX_RING_HEAD+1] = 0;
2563 reg_state[CTX_RING_TAIL+1] = 0;
2564
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002565 i915_gem_object_unpin_map(ctx_obj);
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002566
Chris Wilson9021ad02016-05-24 14:53:37 +01002567 ce->ringbuf->head = 0;
2568 ce->ringbuf->tail = 0;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002569 }
2570}