blob: f567d4b08863c94e7592813e1b25baf37c92a6d8 [file] [log] [blame]
Oscar Mateob20385f2014-07-24 17:04:10 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
Oscar Mateo73e4d072014-07-24 17:04:48 +010031/**
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
33 *
34 * Motivation:
Oscar Mateob20385f2014-07-24 17:04:10 +010035 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
38 *
Oscar Mateo73e4d072014-07-24 17:04:48 +010039 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
42 *
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
47 *
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
55 *
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
58 *
59 * LRC implementation:
60 * Regarding the creation of contexts, we have:
61 *
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
65 *
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
69 *
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
72 *
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
77 * contexts:
78 *
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
83 * so on.
84 *
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
88 *
89 * Execlists implementation:
Oscar Mateob20385f2014-07-24 17:04:10 +010090 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
Oscar Mateo73e4d072014-07-24 17:04:48 +010092 * This method works as follows:
93 *
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
101 * context itself.
102 *
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
108 *
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
114 *
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
122 *
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
132 *
Oscar Mateob20385f2014-07-24 17:04:10 +0100133 */
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100134#include <linux/interrupt.h>
Oscar Mateob20385f2014-07-24 17:04:10 +0100135
136#include <drm/drmP.h>
137#include <drm/i915_drm.h>
138#include "i915_drv.h"
Peter Antoine3bbaba02015-07-10 20:13:11 +0300139#include "intel_mocs.h"
Oscar Mateo127f1002014-07-24 17:04:11 +0100140
Michael H. Nguyen468c6812014-11-13 17:51:49 +0000141#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
Oscar Mateo8c8579172014-07-24 17:04:14 +0100142#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144
Thomas Daniele981e7b2014-07-24 17:04:39 +0100145#define RING_EXECLIST_QFULL (1 << 0x2)
146#define RING_EXECLIST1_VALID (1 << 0x3)
147#define RING_EXECLIST0_VALID (1 << 0x4)
148#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
149#define RING_EXECLIST1_ACTIVE (1 << 0x11)
150#define RING_EXECLIST0_ACTIVE (1 << 0x12)
151
152#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
153#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
154#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
155#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
156#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
157#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100158
Chris Wilson70c2a242016-09-09 14:11:46 +0100159#define GEN8_CTX_STATUS_COMPLETED_MASK \
160 (GEN8_CTX_STATUS_ACTIVE_IDLE | \
161 GEN8_CTX_STATUS_PREEMPTED | \
162 GEN8_CTX_STATUS_ELEMENT_SWITCH)
163
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100164#define CTX_LRI_HEADER_0 0x01
165#define CTX_CONTEXT_CONTROL 0x02
166#define CTX_RING_HEAD 0x04
167#define CTX_RING_TAIL 0x06
168#define CTX_RING_BUFFER_START 0x08
169#define CTX_RING_BUFFER_CONTROL 0x0a
170#define CTX_BB_HEAD_U 0x0c
171#define CTX_BB_HEAD_L 0x0e
172#define CTX_BB_STATE 0x10
173#define CTX_SECOND_BB_HEAD_U 0x12
174#define CTX_SECOND_BB_HEAD_L 0x14
175#define CTX_SECOND_BB_STATE 0x16
176#define CTX_BB_PER_CTX_PTR 0x18
177#define CTX_RCS_INDIRECT_CTX 0x1a
178#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
179#define CTX_LRI_HEADER_1 0x21
180#define CTX_CTX_TIMESTAMP 0x22
181#define CTX_PDP3_UDW 0x24
182#define CTX_PDP3_LDW 0x26
183#define CTX_PDP2_UDW 0x28
184#define CTX_PDP2_LDW 0x2a
185#define CTX_PDP1_UDW 0x2c
186#define CTX_PDP1_LDW 0x2e
187#define CTX_PDP0_UDW 0x30
188#define CTX_PDP0_LDW 0x32
189#define CTX_LRI_HEADER_2 0x41
190#define CTX_R_PWR_CLK_STATE 0x42
191#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
192
Ville Syrjälä0d925ea2015-11-04 23:20:11 +0200193#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200194 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
Ville Syrjälä0d925ea2015-11-04 23:20:11 +0200195 (reg_state)[(pos)+1] = (val); \
196} while (0)
197
198#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300199 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
Michel Thierrye5815a22015-04-08 12:13:32 +0100200 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
201 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
Ville Syrjälä9244a812015-11-04 23:20:09 +0200202} while (0)
Michel Thierrye5815a22015-04-08 12:13:32 +0100203
Ville Syrjälä9244a812015-11-04 23:20:09 +0200204#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
Michel Thierry2dba3232015-07-30 11:06:23 +0100205 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
206 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
Ville Syrjälä9244a812015-11-04 23:20:09 +0200207} while (0)
Michel Thierry2dba3232015-07-30 11:06:23 +0100208
Michel Thierry71562912016-02-23 10:31:49 +0000209#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
210#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
Ben Widawsky84b790f2014-07-24 17:04:36 +0100211
Chris Wilson0e93cdd2016-04-29 09:07:06 +0100212/* Typical size of the average request (2 pipecontrols and a MI_BB) */
213#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
214
Chris Wilsona3aabe82016-10-04 21:11:26 +0100215#define WA_TAIL_DWORDS 2
216
Chris Wilsone2efd132016-05-24 14:53:34 +0100217static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
Chris Wilson978f1e02016-04-28 09:56:54 +0100218 struct intel_engine_cs *engine);
Chris Wilsona3aabe82016-10-04 21:11:26 +0100219static void execlists_init_reg_state(u32 *reg_state,
220 struct i915_gem_context *ctx,
221 struct intel_engine_cs *engine,
222 struct intel_ring *ring);
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000223
Oscar Mateo73e4d072014-07-24 17:04:48 +0100224/**
225 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100226 * @dev_priv: i915 device private
Oscar Mateo73e4d072014-07-24 17:04:48 +0100227 * @enable_execlists: value of i915.enable_execlists module parameter.
228 *
229 * Only certain platforms support Execlists (the prerequisites being
Thomas Daniel27401d12014-12-11 12:48:35 +0000230 * support for Logical Ring Contexts and Aliasing PPGTT or better).
Oscar Mateo73e4d072014-07-24 17:04:48 +0100231 *
232 * Return: 1 if Execlists is supported and has to be enabled.
233 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100234int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
Oscar Mateo127f1002014-07-24 17:04:11 +0100235{
Zhiyuan Lva0bd6c32015-08-28 15:41:16 +0800236 /* On platforms with execlist available, vGPU will only
237 * support execlist mode, no ring buffer mode.
238 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100239 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
Zhiyuan Lva0bd6c32015-08-28 15:41:16 +0800240 return 1;
241
Chris Wilsonc0336662016-05-06 15:40:21 +0100242 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau70ee45e2014-11-14 15:05:59 +0000243 return 1;
244
Oscar Mateo127f1002014-07-24 17:04:11 +0100245 if (enable_execlists == 0)
246 return 0;
247
Daniel Vetter5a21b662016-05-24 17:13:53 +0200248 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
249 USES_PPGTT(dev_priv) &&
250 i915.use_mmio_flip >= 0)
Oscar Mateo127f1002014-07-24 17:04:11 +0100251 return 1;
252
253 return 0;
254}
Oscar Mateoede7d422014-07-24 17:04:12 +0100255
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000256/**
257 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
258 * descriptor for a pinned context
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000259 * @ctx: Context to work on
Chris Wilson9021ad02016-05-24 14:53:37 +0100260 * @engine: Engine the descriptor will be used with
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000261 *
262 * The context descriptor encodes various attributes of a context,
263 * including its GTT address and some flags. Because it's fairly
264 * expensive to calculate, we'll just do it once and cache the result,
265 * which remains valid until the context is unpinned.
266 *
Daniel Vetter6e5248b2016-07-15 21:48:06 +0200267 * This is what a descriptor looks like, from LSB to MSB::
268 *
Mika Kuoppala2355cf02017-01-27 15:03:09 +0200269 * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
Daniel Vetter6e5248b2016-07-15 21:48:06 +0200270 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
271 * bits 32-52: ctx ID, a globally unique tag
272 * bits 53-54: mbz, reserved for use by hardware
273 * bits 55-63: group ID, currently unused and set to 0
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000274 */
275static void
Chris Wilsone2efd132016-05-24 14:53:34 +0100276intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000277 struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000278{
Chris Wilson9021ad02016-05-24 14:53:37 +0100279 struct intel_context *ce = &ctx->engine[engine->id];
Chris Wilson7069b142016-04-28 09:56:52 +0100280 u64 desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000281
Chris Wilson7069b142016-04-28 09:56:52 +0100282 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
283
Mika Kuoppala2355cf02017-01-27 15:03:09 +0200284 desc = ctx->desc_template; /* bits 0-11 */
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100285 desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE;
Chris Wilson9021ad02016-05-24 14:53:37 +0100286 /* bits 12-31 */
Chris Wilson7069b142016-04-28 09:56:52 +0100287 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000288
Chris Wilson9021ad02016-05-24 14:53:37 +0100289 ce->lrc_desc = desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000290}
291
Chris Wilsone2efd132016-05-24 14:53:34 +0100292uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000293 struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000294{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000295 return ctx->engine[engine->id].lrc_desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000296}
297
Chris Wilsonbbd6c472016-09-09 14:11:45 +0100298static inline void
299execlists_context_status_change(struct drm_i915_gem_request *rq,
300 unsigned long status)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100301{
Chris Wilsonbbd6c472016-09-09 14:11:45 +0100302 /*
303 * Only used when GVT-g is enabled now. When GVT-g is disabled,
304 * The compiler should eliminate this function as dead-code.
305 */
306 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
307 return;
Ben Widawsky84b790f2014-07-24 17:04:36 +0100308
Chris Wilsonbbd6c472016-09-09 14:11:45 +0100309 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
Ben Widawsky84b790f2014-07-24 17:04:36 +0100310}
311
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000312static void
313execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
314{
315 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
316 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
317 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
318 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
319}
320
Chris Wilson70c2a242016-09-09 14:11:46 +0100321static u64 execlists_update_context(struct drm_i915_gem_request *rq)
Oscar Mateoae1250b2014-07-24 17:04:37 +0100322{
Chris Wilson70c2a242016-09-09 14:11:46 +0100323 struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
Zhi Wang04da8112017-02-06 18:37:16 +0800324 struct i915_hw_ppgtt *ppgtt =
325 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
Chris Wilson70c2a242016-09-09 14:11:46 +0100326 u32 *reg_state = ce->lrc_reg_state;
Oscar Mateoae1250b2014-07-24 17:04:37 +0100327
Chris Wilsoncaddfe72016-10-28 13:58:52 +0100328 reg_state[CTX_RING_TAIL+1] = rq->tail;
Oscar Mateoae1250b2014-07-24 17:04:37 +0100329
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000330 /* True 32b PPGTT with dynamic page allocation: update PDP
331 * registers and point the unallocated PDPs to scratch page.
332 * PML4 is allocated during ppgtt init, so this is not needed
333 * in 48-bit mode.
334 */
Chris Wilson949e8ab2017-02-09 14:40:36 +0000335 if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000336 execlists_update_context_pdps(ppgtt, reg_state);
Chris Wilson70c2a242016-09-09 14:11:46 +0100337
338 return ce->lrc_desc;
Oscar Mateoae1250b2014-07-24 17:04:37 +0100339}
340
Chris Wilson70c2a242016-09-09 14:11:46 +0100341static void execlists_submit_ports(struct intel_engine_cs *engine)
Chris Wilsonbbd6c472016-09-09 14:11:45 +0100342{
Chris Wilson70c2a242016-09-09 14:11:46 +0100343 struct drm_i915_private *dev_priv = engine->i915;
344 struct execlist_port *port = engine->execlist_port;
Chris Wilsonbbd6c472016-09-09 14:11:45 +0100345 u32 __iomem *elsp =
346 dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
347 u64 desc[2];
348
Chris Wilsonc816e602017-01-24 11:00:02 +0000349 GEM_BUG_ON(port[0].count > 1);
Chris Wilson70c2a242016-09-09 14:11:46 +0100350 if (!port[0].count)
351 execlists_context_status_change(port[0].request,
352 INTEL_CONTEXT_SCHEDULE_IN);
353 desc[0] = execlists_update_context(port[0].request);
Chris Wilsonae9a0432017-02-07 10:23:19 +0000354 GEM_DEBUG_EXEC(port[0].context_id = upper_32_bits(desc[0]));
Chris Wilson816ee792017-01-24 11:00:03 +0000355 port[0].count++;
Chris Wilson70c2a242016-09-09 14:11:46 +0100356
357 if (port[1].request) {
358 GEM_BUG_ON(port[1].count);
359 execlists_context_status_change(port[1].request,
360 INTEL_CONTEXT_SCHEDULE_IN);
361 desc[1] = execlists_update_context(port[1].request);
Chris Wilsonae9a0432017-02-07 10:23:19 +0000362 GEM_DEBUG_EXEC(port[1].context_id = upper_32_bits(desc[1]));
Chris Wilson70c2a242016-09-09 14:11:46 +0100363 port[1].count = 1;
Chris Wilsonbbd6c472016-09-09 14:11:45 +0100364 } else {
365 desc[1] = 0;
366 }
Chris Wilson70c2a242016-09-09 14:11:46 +0100367 GEM_BUG_ON(desc[0] == desc[1]);
Chris Wilsonbbd6c472016-09-09 14:11:45 +0100368
369 /* You must always write both descriptors in the order below. */
370 writel(upper_32_bits(desc[1]), elsp);
371 writel(lower_32_bits(desc[1]), elsp);
372
373 writel(upper_32_bits(desc[0]), elsp);
374 /* The context is automatically loaded after the following */
375 writel(lower_32_bits(desc[0]), elsp);
376}
377
Chris Wilson70c2a242016-09-09 14:11:46 +0100378static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100379{
Chris Wilson70c2a242016-09-09 14:11:46 +0100380 return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
Chris Wilson60958682016-12-31 11:20:11 +0000381 i915_gem_context_force_single_submission(ctx));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100382}
383
Chris Wilson70c2a242016-09-09 14:11:46 +0100384static bool can_merge_ctx(const struct i915_gem_context *prev,
385 const struct i915_gem_context *next)
Michel Thierryacdd8842014-07-24 17:04:38 +0100386{
Chris Wilson70c2a242016-09-09 14:11:46 +0100387 if (prev != next)
388 return false;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100389
Chris Wilson70c2a242016-09-09 14:11:46 +0100390 if (ctx_single_port_submission(prev))
391 return false;
Michel Thierryacdd8842014-07-24 17:04:38 +0100392
Chris Wilson70c2a242016-09-09 14:11:46 +0100393 return true;
394}
Peter Antoine779949f2015-05-11 16:03:27 +0100395
Chris Wilson70c2a242016-09-09 14:11:46 +0100396static void execlists_dequeue(struct intel_engine_cs *engine)
397{
Chris Wilson20311bd2016-11-14 20:41:03 +0000398 struct drm_i915_gem_request *last;
Chris Wilson70c2a242016-09-09 14:11:46 +0100399 struct execlist_port *port = engine->execlist_port;
Chris Wilsond55ac5b2016-11-14 20:40:59 +0000400 unsigned long flags;
Chris Wilson20311bd2016-11-14 20:41:03 +0000401 struct rb_node *rb;
Chris Wilson70c2a242016-09-09 14:11:46 +0100402 bool submit = false;
Michel Thierryacdd8842014-07-24 17:04:38 +0100403
Chris Wilson70c2a242016-09-09 14:11:46 +0100404 last = port->request;
405 if (last)
406 /* WaIdleLiteRestore:bdw,skl
407 * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
Chris Wilson9b81d552016-10-28 13:58:50 +0100408 * as we resubmit the request. See gen8_emit_breadcrumb()
Chris Wilson70c2a242016-09-09 14:11:46 +0100409 * for where we prepare the padding after the end of the
410 * request.
Michel Thierry53292cd2015-04-15 18:11:33 +0100411 */
Chris Wilson70c2a242016-09-09 14:11:46 +0100412 last->tail = last->wa_tail;
413
414 GEM_BUG_ON(port[1].request);
415
416 /* Hardware submission is through 2 ports. Conceptually each port
417 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
418 * static for a context, and unique to each, so we only execute
419 * requests belonging to a single context from each ring. RING_HEAD
420 * is maintained by the CS in the context image, it marks the place
421 * where it got up to last time, and through RING_TAIL we tell the CS
422 * where we want to execute up to this time.
423 *
424 * In this list the requests are in order of execution. Consecutive
425 * requests from the same context are adjacent in the ringbuffer. We
426 * can combine these requests into a single RING_TAIL update:
427 *
428 * RING_HEAD...req1...req2
429 * ^- RING_TAIL
430 * since to execute req2 the CS must first execute req1.
431 *
432 * Our goal then is to point each port to the end of a consecutive
433 * sequence of requests as being the most optimal (fewest wake ups
434 * and context switches) submission.
435 */
436
Chris Wilsond55ac5b2016-11-14 20:40:59 +0000437 spin_lock_irqsave(&engine->timeline->lock, flags);
Chris Wilson20311bd2016-11-14 20:41:03 +0000438 rb = engine->execlist_first;
439 while (rb) {
440 struct drm_i915_gem_request *cursor =
441 rb_entry(rb, typeof(*cursor), priotree.node);
442
Chris Wilson70c2a242016-09-09 14:11:46 +0100443 /* Can we combine this request with the current port? It has to
444 * be the same context/ringbuffer and not have any exceptions
445 * (e.g. GVT saying never to combine contexts).
446 *
447 * If we can combine the requests, we can execute both by
448 * updating the RING_TAIL to point to the end of the second
449 * request, and so we never need to tell the hardware about
450 * the first.
451 */
452 if (last && !can_merge_ctx(cursor->ctx, last->ctx)) {
453 /* If we are on the second port and cannot combine
454 * this request with the last, then we are done.
455 */
456 if (port != engine->execlist_port)
457 break;
458
459 /* If GVT overrides us we only ever submit port[0],
460 * leaving port[1] empty. Note that we also have
461 * to be careful that we don't queue the same
462 * context (even though a different request) to
463 * the second port.
464 */
Min Hed7ab9922016-11-16 22:05:04 +0800465 if (ctx_single_port_submission(last->ctx) ||
466 ctx_single_port_submission(cursor->ctx))
Chris Wilson70c2a242016-09-09 14:11:46 +0100467 break;
468
469 GEM_BUG_ON(last->ctx == cursor->ctx);
470
471 i915_gem_request_assign(&port->request, last);
472 port++;
473 }
Chris Wilsond55ac5b2016-11-14 20:40:59 +0000474
Chris Wilson20311bd2016-11-14 20:41:03 +0000475 rb = rb_next(rb);
476 rb_erase(&cursor->priotree.node, &engine->execlist_queue);
477 RB_CLEAR_NODE(&cursor->priotree.node);
478 cursor->priotree.priority = INT_MAX;
479
Chris Wilsond55ac5b2016-11-14 20:40:59 +0000480 __i915_gem_request_submit(cursor);
Chris Wilson70c2a242016-09-09 14:11:46 +0100481 last = cursor;
482 submit = true;
Michel Thierry53292cd2015-04-15 18:11:33 +0100483 }
Chris Wilson70c2a242016-09-09 14:11:46 +0100484 if (submit) {
Chris Wilson70c2a242016-09-09 14:11:46 +0100485 i915_gem_request_assign(&port->request, last);
Chris Wilson20311bd2016-11-14 20:41:03 +0000486 engine->execlist_first = rb;
Chris Wilson70c2a242016-09-09 14:11:46 +0100487 }
Chris Wilsond55ac5b2016-11-14 20:40:59 +0000488 spin_unlock_irqrestore(&engine->timeline->lock, flags);
Chris Wilson70c2a242016-09-09 14:11:46 +0100489
490 if (submit)
491 execlists_submit_ports(engine);
Michel Thierryacdd8842014-07-24 17:04:38 +0100492}
493
Chris Wilson70c2a242016-09-09 14:11:46 +0100494static bool execlists_elsp_idle(struct intel_engine_cs *engine)
Thomas Daniele981e7b2014-07-24 17:04:39 +0100495{
Chris Wilson70c2a242016-09-09 14:11:46 +0100496 return !engine->execlist_port[0].request;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100497}
498
Imre Deak0cb56702016-11-07 11:20:04 +0200499/**
500 * intel_execlists_idle() - Determine if all engine submission ports are idle
501 * @dev_priv: i915 device private
502 *
503 * Return true if there are no requests pending on any of the submission ports
504 * of any engines.
505 */
506bool intel_execlists_idle(struct drm_i915_private *dev_priv)
507{
508 struct intel_engine_cs *engine;
509 enum intel_engine_id id;
510
511 if (!i915.enable_execlists)
512 return true;
513
Chris Wilson453cfe22017-02-01 13:12:22 +0000514 for_each_engine(engine, dev_priv, id) {
515 /* Interrupt/tasklet pending? */
516 if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
517 return false;
518
519 /* Both ports drained, no more ELSP submission? */
Imre Deak0cb56702016-11-07 11:20:04 +0200520 if (!execlists_elsp_idle(engine))
521 return false;
Chris Wilson453cfe22017-02-01 13:12:22 +0000522 }
Imre Deak0cb56702016-11-07 11:20:04 +0200523
524 return true;
525}
526
Chris Wilson816ee792017-01-24 11:00:03 +0000527static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
Ben Widawsky91a41032016-01-05 10:30:07 -0800528{
Chris Wilson816ee792017-01-24 11:00:03 +0000529 const struct execlist_port *port = engine->execlist_port;
Ben Widawsky91a41032016-01-05 10:30:07 -0800530
Chris Wilson816ee792017-01-24 11:00:03 +0000531 return port[0].count + port[1].count < 2;
Ben Widawsky91a41032016-01-05 10:30:07 -0800532}
533
Daniel Vetter6e5248b2016-07-15 21:48:06 +0200534/*
Oscar Mateo73e4d072014-07-24 17:04:48 +0100535 * Check the unread Context Status Buffers and manage the submission of new
536 * contexts to the ELSP accordingly.
537 */
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100538static void intel_lrc_irq_handler(unsigned long data)
Thomas Daniele981e7b2014-07-24 17:04:39 +0100539{
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100540 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
Chris Wilson70c2a242016-09-09 14:11:46 +0100541 struct execlist_port *port = engine->execlist_port;
Chris Wilsonc0336662016-05-06 15:40:21 +0100542 struct drm_i915_private *dev_priv = engine->i915;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100543
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100544 intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000545
Chris Wilsonf7470262017-01-24 15:20:21 +0000546 while (test_and_clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
Chris Wilson70c2a242016-09-09 14:11:46 +0100547 u32 __iomem *csb_mmio =
548 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
549 u32 __iomem *buf =
550 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
551 unsigned int csb, head, tail;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100552
Chris Wilson70c2a242016-09-09 14:11:46 +0100553 csb = readl(csb_mmio);
554 head = GEN8_CSB_READ_PTR(csb);
555 tail = GEN8_CSB_WRITE_PTR(csb);
Chris Wilsona37951a2017-01-24 11:00:06 +0000556 if (head == tail)
557 break;
558
Chris Wilson70c2a242016-09-09 14:11:46 +0100559 if (tail < head)
560 tail += GEN8_CSB_ENTRIES;
Chris Wilsona37951a2017-01-24 11:00:06 +0000561 do {
Chris Wilson70c2a242016-09-09 14:11:46 +0100562 unsigned int idx = ++head % GEN8_CSB_ENTRIES;
563 unsigned int status = readl(buf + 2 * idx);
Thomas Daniele981e7b2014-07-24 17:04:39 +0100564
Chris Wilson2ffe80a2017-02-06 17:05:02 +0000565 /* We are flying near dragons again.
566 *
567 * We hold a reference to the request in execlist_port[]
568 * but no more than that. We are operating in softirq
569 * context and so cannot hold any mutex or sleep. That
570 * prevents us stopping the requests we are processing
571 * in port[] from being retired simultaneously (the
572 * breadcrumb will be complete before we see the
573 * context-switch). As we only hold the reference to the
574 * request, any pointer chasing underneath the request
575 * is subject to a potential use-after-free. Thus we
576 * store all of the bookkeeping within port[] as
577 * required, and avoid using unguarded pointers beneath
578 * request itself. The same applies to the atomic
579 * status notifier.
580 */
581
Chris Wilson70c2a242016-09-09 14:11:46 +0100582 if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
583 continue;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100584
Chris Wilson86aa7e72017-01-23 11:31:32 +0000585 /* Check the context/desc id for this event matches */
Chris Wilsonae9a0432017-02-07 10:23:19 +0000586 GEM_DEBUG_BUG_ON(readl(buf + 2 * idx + 1) !=
587 port[0].context_id);
Chris Wilson86aa7e72017-01-23 11:31:32 +0000588
Chris Wilson70c2a242016-09-09 14:11:46 +0100589 GEM_BUG_ON(port[0].count == 0);
590 if (--port[0].count == 0) {
591 GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
592 execlists_context_status_change(port[0].request,
593 INTEL_CONTEXT_SCHEDULE_OUT);
Thomas Daniele981e7b2014-07-24 17:04:39 +0100594
Chris Wilson70c2a242016-09-09 14:11:46 +0100595 i915_gem_request_put(port[0].request);
596 port[0] = port[1];
597 memset(&port[1], 0, sizeof(port[1]));
Chris Wilson70c2a242016-09-09 14:11:46 +0100598 }
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000599
Chris Wilson70c2a242016-09-09 14:11:46 +0100600 GEM_BUG_ON(port[0].count == 0 &&
601 !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
Chris Wilsona37951a2017-01-24 11:00:06 +0000602 } while (head < tail);
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000603
Chris Wilson70c2a242016-09-09 14:11:46 +0100604 writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
605 GEN8_CSB_WRITE_PTR(csb) << 8),
606 csb_mmio);
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000607 }
608
Chris Wilson70c2a242016-09-09 14:11:46 +0100609 if (execlists_elsp_ready(engine))
610 execlists_dequeue(engine);
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000611
Chris Wilson70c2a242016-09-09 14:11:46 +0100612 intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
Thomas Daniele981e7b2014-07-24 17:04:39 +0100613}
614
Chris Wilson20311bd2016-11-14 20:41:03 +0000615static bool insert_request(struct i915_priotree *pt, struct rb_root *root)
616{
617 struct rb_node **p, *rb;
618 bool first = true;
619
620 /* most positive priority is scheduled first, equal priorities fifo */
621 rb = NULL;
622 p = &root->rb_node;
623 while (*p) {
624 struct i915_priotree *pos;
625
626 rb = *p;
627 pos = rb_entry(rb, typeof(*pos), node);
628 if (pt->priority > pos->priority) {
629 p = &rb->rb_left;
630 } else {
631 p = &rb->rb_right;
632 first = false;
633 }
634 }
635 rb_link_node(&pt->node, rb, p);
636 rb_insert_color(&pt->node, root);
637
638 return first;
639}
640
Chris Wilsonf4ea6bd2016-08-02 22:50:32 +0100641static void execlists_submit_request(struct drm_i915_gem_request *request)
Michel Thierryacdd8842014-07-24 17:04:38 +0100642{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000643 struct intel_engine_cs *engine = request->engine;
Chris Wilson5590af32016-09-09 14:11:54 +0100644 unsigned long flags;
Michel Thierryacdd8842014-07-24 17:04:38 +0100645
Chris Wilson663f71e2016-11-14 20:41:00 +0000646 /* Will be called from irq-context when using foreign fences. */
647 spin_lock_irqsave(&engine->timeline->lock, flags);
Michel Thierryacdd8842014-07-24 17:04:38 +0100648
Chris Wilson38332812017-01-24 11:00:07 +0000649 if (insert_request(&request->priotree, &engine->execlist_queue)) {
Chris Wilson20311bd2016-11-14 20:41:03 +0000650 engine->execlist_first = &request->priotree.node;
Chris Wilson48ea2552017-01-24 11:00:08 +0000651 if (execlists_elsp_ready(engine))
Chris Wilson38332812017-01-24 11:00:07 +0000652 tasklet_hi_schedule(&engine->irq_tasklet);
653 }
Michel Thierryacdd8842014-07-24 17:04:38 +0100654
Chris Wilson663f71e2016-11-14 20:41:00 +0000655 spin_unlock_irqrestore(&engine->timeline->lock, flags);
Michel Thierryacdd8842014-07-24 17:04:38 +0100656}
657
Chris Wilson20311bd2016-11-14 20:41:03 +0000658static struct intel_engine_cs *
659pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
660{
661 struct intel_engine_cs *engine;
662
663 engine = container_of(pt,
664 struct drm_i915_gem_request,
665 priotree)->engine;
666 if (engine != locked) {
667 if (locked)
668 spin_unlock_irq(&locked->timeline->lock);
669 spin_lock_irq(&engine->timeline->lock);
670 }
671
672 return engine;
673}
674
675static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
676{
677 struct intel_engine_cs *engine = NULL;
678 struct i915_dependency *dep, *p;
679 struct i915_dependency stack;
680 LIST_HEAD(dfs);
681
682 if (prio <= READ_ONCE(request->priotree.priority))
683 return;
684
Chris Wilson70cd1472016-11-28 14:36:49 +0000685 /* Need BKL in order to use the temporary link inside i915_dependency */
686 lockdep_assert_held(&request->i915->drm.struct_mutex);
Chris Wilson20311bd2016-11-14 20:41:03 +0000687
688 stack.signaler = &request->priotree;
689 list_add(&stack.dfs_link, &dfs);
690
691 /* Recursively bump all dependent priorities to match the new request.
692 *
693 * A naive approach would be to use recursion:
694 * static void update_priorities(struct i915_priotree *pt, prio) {
695 * list_for_each_entry(dep, &pt->signalers_list, signal_link)
696 * update_priorities(dep->signal, prio)
697 * insert_request(pt);
698 * }
699 * but that may have unlimited recursion depth and so runs a very
700 * real risk of overunning the kernel stack. Instead, we build
701 * a flat list of all dependencies starting with the current request.
702 * As we walk the list of dependencies, we add all of its dependencies
703 * to the end of the list (this may include an already visited
704 * request) and continue to walk onwards onto the new dependencies. The
705 * end result is a topological list of requests in reverse order, the
706 * last element in the list is the request we must execute first.
707 */
708 list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
709 struct i915_priotree *pt = dep->signaler;
710
711 list_for_each_entry(p, &pt->signalers_list, signal_link)
712 if (prio > READ_ONCE(p->signaler->priority))
713 list_move_tail(&p->dfs_link, &dfs);
714
Chris Wilson0798cff2016-12-05 14:29:41 +0000715 list_safe_reset_next(dep, p, dfs_link);
Chris Wilson20311bd2016-11-14 20:41:03 +0000716 if (!RB_EMPTY_NODE(&pt->node))
717 continue;
718
719 engine = pt_lock_engine(pt, engine);
720
721 /* If it is not already in the rbtree, we can update the
722 * priority inplace and skip over it (and its dependencies)
723 * if it is referenced *again* as we descend the dfs.
724 */
725 if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
726 pt->priority = prio;
727 list_del_init(&dep->dfs_link);
728 }
729 }
730
731 /* Fifo and depth-first replacement ensure our deps execute before us */
732 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
733 struct i915_priotree *pt = dep->signaler;
734
735 INIT_LIST_HEAD(&dep->dfs_link);
736
737 engine = pt_lock_engine(pt, engine);
738
739 if (prio <= pt->priority)
740 continue;
741
742 GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
743
744 pt->priority = prio;
745 rb_erase(&pt->node, &engine->execlist_queue);
746 if (insert_request(pt, &engine->execlist_queue))
747 engine->execlist_first = &pt->node;
748 }
749
750 if (engine)
751 spin_unlock_irq(&engine->timeline->lock);
752
753 /* XXX Do we need to preempt to make room for us and our deps? */
754}
755
Chris Wilsone8a9c582016-12-18 15:37:20 +0000756static int execlists_context_pin(struct intel_engine_cs *engine,
757 struct i915_gem_context *ctx)
Oscar Mateodcb4c122014-11-13 10:28:10 +0000758{
Chris Wilson9021ad02016-05-24 14:53:37 +0100759 struct intel_context *ce = &ctx->engine[engine->id];
Chris Wilson2947e402016-12-18 15:37:23 +0000760 unsigned int flags;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100761 void *vaddr;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000762 int ret;
Oscar Mateodcb4c122014-11-13 10:28:10 +0000763
Chris Wilson91c8a322016-07-05 10:40:23 +0100764 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000765
Chris Wilson9021ad02016-05-24 14:53:37 +0100766 if (ce->pin_count++)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100767 return 0;
768
Chris Wilsone8a9c582016-12-18 15:37:20 +0000769 if (!ce->state) {
770 ret = execlists_context_deferred_alloc(ctx, engine);
771 if (ret)
772 goto err;
773 }
Chris Wilson56f6e0a2017-01-05 15:30:20 +0000774 GEM_BUG_ON(!ce->state);
Chris Wilsone8a9c582016-12-18 15:37:20 +0000775
Chris Wilson72b72ae2017-02-10 10:14:22 +0000776 flags = PIN_GLOBAL | PIN_HIGH;
Daniele Ceraolo Spuriofeef2a72016-12-23 15:56:22 -0800777 if (ctx->ggtt_offset_bias)
778 flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
Chris Wilson2947e402016-12-18 15:37:23 +0000779
780 ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, flags);
Nick Hoathe84fe802015-09-11 12:53:46 +0100781 if (ret)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100782 goto err;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000783
Chris Wilsonbf3783e2016-08-15 10:48:54 +0100784 vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100785 if (IS_ERR(vaddr)) {
786 ret = PTR_ERR(vaddr);
Chris Wilsonbf3783e2016-08-15 10:48:54 +0100787 goto unpin_vma;
Tvrtko Ursulin82352e92016-01-15 17:12:45 +0000788 }
789
Daniele Ceraolo Spuriod3ef1af2016-12-23 15:56:21 -0800790 ret = intel_ring_pin(ce->ring, ctx->ggtt_offset_bias);
Nick Hoathe84fe802015-09-11 12:53:46 +0100791 if (ret)
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100792 goto unpin_map;
Alex Daid1675192015-08-12 15:43:43 +0100793
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000794 intel_lr_context_descriptor_update(ctx, engine);
Chris Wilson9021ad02016-05-24 14:53:37 +0100795
Chris Wilsona3aabe82016-10-04 21:11:26 +0100796 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
797 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100798 i915_ggtt_offset(ce->ring->vma);
Chris Wilsona3aabe82016-10-04 21:11:26 +0100799
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100800 ce->state->obj->mm.dirty = true;
Daniel Vettere93c28f2015-09-02 14:33:42 +0200801
Chris Wilson9a6feaf2016-07-20 13:31:50 +0100802 i915_gem_context_get(ctx);
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100803 return 0;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000804
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100805unpin_map:
Chris Wilsonbf3783e2016-08-15 10:48:54 +0100806 i915_gem_object_unpin_map(ce->state->obj);
807unpin_vma:
808 __i915_vma_unpin(ce->state);
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100809err:
Chris Wilson9021ad02016-05-24 14:53:37 +0100810 ce->pin_count = 0;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000811 return ret;
Oscar Mateodcb4c122014-11-13 10:28:10 +0000812}
813
Chris Wilsone8a9c582016-12-18 15:37:20 +0000814static void execlists_context_unpin(struct intel_engine_cs *engine,
815 struct i915_gem_context *ctx)
Oscar Mateodcb4c122014-11-13 10:28:10 +0000816{
Chris Wilson9021ad02016-05-24 14:53:37 +0100817 struct intel_context *ce = &ctx->engine[engine->id];
Daniel Vetteraf3302b2015-12-04 17:27:15 +0100818
Chris Wilson91c8a322016-07-05 10:40:23 +0100819 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
Chris Wilson9021ad02016-05-24 14:53:37 +0100820 GEM_BUG_ON(ce->pin_count == 0);
Tvrtko Ursulin321fe302016-01-28 10:29:55 +0000821
Chris Wilson9021ad02016-05-24 14:53:37 +0100822 if (--ce->pin_count)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100823 return;
824
Chris Wilsonaad29fb2016-08-02 22:50:23 +0100825 intel_ring_unpin(ce->ring);
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100826
Chris Wilsonbf3783e2016-08-15 10:48:54 +0100827 i915_gem_object_unpin_map(ce->state->obj);
828 i915_vma_unpin(ce->state);
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100829
Chris Wilson9a6feaf2016-07-20 13:31:50 +0100830 i915_gem_context_put(ctx);
Oscar Mateodcb4c122014-11-13 10:28:10 +0000831}
832
Chris Wilsonf73e7392016-12-18 15:37:24 +0000833static int execlists_request_alloc(struct drm_i915_gem_request *request)
Chris Wilsonef11c012016-12-18 15:37:19 +0000834{
835 struct intel_engine_cs *engine = request->engine;
836 struct intel_context *ce = &request->ctx->engine[engine->id];
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000837 u32 *cs;
Chris Wilsonef11c012016-12-18 15:37:19 +0000838 int ret;
839
Chris Wilsone8a9c582016-12-18 15:37:20 +0000840 GEM_BUG_ON(!ce->pin_count);
841
Chris Wilsonef11c012016-12-18 15:37:19 +0000842 /* Flush enough space to reduce the likelihood of waiting after
843 * we start building the request - in which case we will just
844 * have to repeat work.
845 */
846 request->reserved_space += EXECLISTS_REQUEST_SIZE;
847
Chris Wilsone8a9c582016-12-18 15:37:20 +0000848 GEM_BUG_ON(!ce->ring);
Chris Wilsonef11c012016-12-18 15:37:19 +0000849 request->ring = ce->ring;
850
Chris Wilsonef11c012016-12-18 15:37:19 +0000851 if (i915.enable_guc_submission) {
852 /*
853 * Check that the GuC has space for the request before
854 * going any further, as the i915_add_request() call
855 * later on mustn't fail ...
856 */
857 ret = i915_guc_wq_reserve(request);
858 if (ret)
Chris Wilsone8a9c582016-12-18 15:37:20 +0000859 goto err;
Chris Wilsonef11c012016-12-18 15:37:19 +0000860 }
861
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000862 cs = intel_ring_begin(request, 0);
863 if (IS_ERR(cs)) {
864 ret = PTR_ERR(cs);
Chris Wilsonef11c012016-12-18 15:37:19 +0000865 goto err_unreserve;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000866 }
Chris Wilsonef11c012016-12-18 15:37:19 +0000867
868 if (!ce->initialised) {
869 ret = engine->init_context(request);
870 if (ret)
871 goto err_unreserve;
872
873 ce->initialised = true;
874 }
875
876 /* Note that after this point, we have committed to using
877 * this request as it is being used to both track the
878 * state of engine initialisation and liveness of the
879 * golden renderstate above. Think twice before you try
880 * to cancel/unwind this request now.
881 */
882
883 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
884 return 0;
885
886err_unreserve:
887 if (i915.enable_guc_submission)
888 i915_guc_wq_unreserve(request);
Chris Wilsone8a9c582016-12-18 15:37:20 +0000889err:
Chris Wilsonef11c012016-12-18 15:37:19 +0000890 return ret;
891}
892
John Harrisone2be4fa2015-05-29 17:43:54 +0100893static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
Michel Thierry771b9a52014-11-11 16:47:33 +0000894{
Chris Wilsonc0336662016-05-06 15:40:21 +0100895 struct i915_workarounds *w = &req->i915->workarounds;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000896 u32 *cs;
897 int ret, i;
Michel Thierry771b9a52014-11-11 16:47:33 +0000898
Boyer, Waynecd7feaa2016-01-06 17:15:29 -0800899 if (w->count == 0)
Michel Thierry771b9a52014-11-11 16:47:33 +0000900 return 0;
901
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100902 ret = req->engine->emit_flush(req, EMIT_BARRIER);
Michel Thierry771b9a52014-11-11 16:47:33 +0000903 if (ret)
904 return ret;
905
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000906 cs = intel_ring_begin(req, w->count * 2 + 2);
907 if (IS_ERR(cs))
908 return PTR_ERR(cs);
Michel Thierry771b9a52014-11-11 16:47:33 +0000909
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000910 *cs++ = MI_LOAD_REGISTER_IMM(w->count);
Michel Thierry771b9a52014-11-11 16:47:33 +0000911 for (i = 0; i < w->count; i++) {
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000912 *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
913 *cs++ = w->reg[i].value;
Michel Thierry771b9a52014-11-11 16:47:33 +0000914 }
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000915 *cs++ = MI_NOOP;
Michel Thierry771b9a52014-11-11 16:47:33 +0000916
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000917 intel_ring_advance(req, cs);
Michel Thierry771b9a52014-11-11 16:47:33 +0000918
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100919 ret = req->engine->emit_flush(req, EMIT_BARRIER);
Michel Thierry771b9a52014-11-11 16:47:33 +0000920 if (ret)
921 return ret;
922
923 return 0;
924}
925
Arun Siluvery83b8a982015-07-08 10:27:05 +0100926#define wa_ctx_emit(batch, index, cmd) \
Arun Siluvery17ee9502015-06-19 19:07:01 +0100927 do { \
Arun Siluvery83b8a982015-07-08 10:27:05 +0100928 int __index = (index)++; \
929 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
Arun Siluvery17ee9502015-06-19 19:07:01 +0100930 return -ENOSPC; \
931 } \
Arun Siluvery83b8a982015-07-08 10:27:05 +0100932 batch[__index] = (cmd); \
Arun Siluvery17ee9502015-06-19 19:07:01 +0100933 } while (0)
934
Ville Syrjälä8f40db72015-11-04 23:20:08 +0200935#define wa_ctx_emit_reg(batch, index, reg) \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200936 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
Arun Siluvery9e000842015-07-03 14:27:31 +0100937
938/*
939 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
940 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
941 * but there is a slight complication as this is applied in WA batch where the
942 * values are only initialized once so we cannot take register value at the
943 * beginning and reuse it further; hence we save its value to memory, upload a
944 * constant value with bit21 set and then we restore it back with the saved value.
945 * To simplify the WA, a constant value is formed by using the default value
946 * of this register. This shouldn't be a problem because we are only modifying
947 * it for a short period and this batch in non-premptible. We can ofcourse
948 * use additional instructions that read the actual value of the register
949 * at that time and set our bit of interest but it makes the WA complicated.
950 *
951 * This WA is also required for Gen9 so extracting as a function avoids
952 * code duplication.
953 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000954static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
Daniel Vetter6e5248b2016-07-15 21:48:06 +0200955 uint32_t *batch,
Arun Siluvery9e000842015-07-03 14:27:31 +0100956 uint32_t index)
957{
958 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
959
Arun Siluveryf1afe242015-08-04 16:22:20 +0100960 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
Arun Siluvery83b8a982015-07-08 10:27:05 +0100961 MI_SRM_LRM_GLOBAL_GTT));
Ville Syrjälä8f40db72015-11-04 23:20:08 +0200962 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100963 wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
Arun Siluvery83b8a982015-07-08 10:27:05 +0100964 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +0100965
Arun Siluvery83b8a982015-07-08 10:27:05 +0100966 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
Ville Syrjälä8f40db72015-11-04 23:20:08 +0200967 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Arun Siluvery83b8a982015-07-08 10:27:05 +0100968 wa_ctx_emit(batch, index, l3sqc4_flush);
Arun Siluvery9e000842015-07-03 14:27:31 +0100969
Arun Siluvery83b8a982015-07-08 10:27:05 +0100970 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
971 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
972 PIPE_CONTROL_DC_FLUSH_ENABLE));
973 wa_ctx_emit(batch, index, 0);
974 wa_ctx_emit(batch, index, 0);
975 wa_ctx_emit(batch, index, 0);
976 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +0100977
Arun Siluveryf1afe242015-08-04 16:22:20 +0100978 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
Arun Siluvery83b8a982015-07-08 10:27:05 +0100979 MI_SRM_LRM_GLOBAL_GTT));
Ville Syrjälä8f40db72015-11-04 23:20:08 +0200980 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100981 wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
Arun Siluvery83b8a982015-07-08 10:27:05 +0100982 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +0100983
984 return index;
985}
986
Arun Siluvery17ee9502015-06-19 19:07:01 +0100987static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
988 uint32_t offset,
989 uint32_t start_alignment)
990{
991 return wa_ctx->offset = ALIGN(offset, start_alignment);
992}
993
994static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
995 uint32_t offset,
996 uint32_t size_alignment)
997{
998 wa_ctx->size = offset - wa_ctx->offset;
999
1000 WARN(wa_ctx->size % size_alignment,
1001 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1002 wa_ctx->size, size_alignment);
1003 return 0;
1004}
1005
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001006/*
1007 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1008 * initialized at the beginning and shared across all contexts but this field
1009 * helps us to have multiple batches at different offsets and select them based
1010 * on a criteria. At the moment this batch always start at the beginning of the page
1011 * and at this point we don't have multiple wa_ctx batch buffers.
Arun Siluvery17ee9502015-06-19 19:07:01 +01001012 *
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001013 * The number of WA applied are not known at the beginning; we use this field
1014 * to return the no of DWORDS written.
Arun Siluvery17ee9502015-06-19 19:07:01 +01001015 *
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001016 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1017 * so it adds NOOPs as padding to make it cacheline aligned.
1018 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1019 * makes a complete batch buffer.
Arun Siluvery17ee9502015-06-19 19:07:01 +01001020 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001021static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001022 struct i915_wa_ctx_bb *wa_ctx,
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001023 uint32_t *batch,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001024 uint32_t *offset)
1025{
Arun Siluvery0160f052015-06-23 15:46:57 +01001026 uint32_t scratch_addr;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001027 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1028
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001029 /* WaDisableCtxRestoreArbitration:bdw,chv */
Arun Siluvery83b8a982015-07-08 10:27:05 +01001030 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001031
Arun Siluveryc82435b2015-06-19 18:37:13 +01001032 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
Chris Wilsonc0336662016-05-06 15:40:21 +01001033 if (IS_BROADWELL(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001034 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
Andrzej Hajda604ef732015-09-21 15:33:35 +02001035 if (rc < 0)
1036 return rc;
1037 index = rc;
Arun Siluveryc82435b2015-06-19 18:37:13 +01001038 }
1039
Arun Siluvery0160f052015-06-23 15:46:57 +01001040 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1041 /* Actual scratch location is at 128 bytes offset */
Chris Wilsonbde13eb2016-08-15 10:49:07 +01001042 scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
Arun Siluvery0160f052015-06-23 15:46:57 +01001043
Arun Siluvery83b8a982015-07-08 10:27:05 +01001044 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1045 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1046 PIPE_CONTROL_GLOBAL_GTT_IVB |
1047 PIPE_CONTROL_CS_STALL |
1048 PIPE_CONTROL_QW_WRITE));
1049 wa_ctx_emit(batch, index, scratch_addr);
1050 wa_ctx_emit(batch, index, 0);
1051 wa_ctx_emit(batch, index, 0);
1052 wa_ctx_emit(batch, index, 0);
Arun Siluvery0160f052015-06-23 15:46:57 +01001053
Arun Siluvery17ee9502015-06-19 19:07:01 +01001054 /* Pad to end of cacheline */
1055 while (index % CACHELINE_DWORDS)
Arun Siluvery83b8a982015-07-08 10:27:05 +01001056 wa_ctx_emit(batch, index, MI_NOOP);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001057
1058 /*
1059 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1060 * execution depends on the length specified in terms of cache lines
1061 * in the register CTX_RCS_INDIRECT_CTX
1062 */
1063
1064 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1065}
1066
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001067/*
1068 * This batch is started immediately after indirect_ctx batch. Since we ensure
1069 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
Arun Siluvery17ee9502015-06-19 19:07:01 +01001070 *
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001071 * The number of DWORDS written are returned using this field.
Arun Siluvery17ee9502015-06-19 19:07:01 +01001072 *
1073 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1074 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1075 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001076static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001077 struct i915_wa_ctx_bb *wa_ctx,
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001078 uint32_t *batch,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001079 uint32_t *offset)
1080{
1081 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1082
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001083 /* WaDisableCtxRestoreArbitration:bdw,chv */
Arun Siluvery83b8a982015-07-08 10:27:05 +01001084 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001085
Arun Siluvery83b8a982015-07-08 10:27:05 +01001086 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001087
1088 return wa_ctx_end(wa_ctx, *offset = index, 1);
1089}
1090
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001091static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001092 struct i915_wa_ctx_bb *wa_ctx,
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001093 uint32_t *batch,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001094 uint32_t *offset)
1095{
Arun Siluverya4106a72015-07-14 15:01:29 +01001096 int ret;
Dave Airlie5e580522016-07-26 17:26:29 +10001097 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluvery0504cff2015-07-14 15:01:27 +01001098 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1099
Ander Conselvan de Oliveira9fb50262017-01-26 11:16:58 +02001100 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001101 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
Arun Siluverya4106a72015-07-14 15:01:29 +01001102 if (ret < 0)
1103 return ret;
1104 index = ret;
1105
Ander Conselvan de Oliveira9fb50262017-01-26 11:16:58 +02001106 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
Mika Kuoppala873e8172016-07-20 14:26:13 +03001107 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1108 wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
1109 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
1110 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
1111 wa_ctx_emit(batch, index, MI_NOOP);
1112
Mika Kuoppala066d4622016-06-07 17:19:15 +03001113 /* WaClearSlmSpaceAtContextSwitch:kbl */
1114 /* Actual scratch location is at 128 bytes offset */
Mika Kuoppala703d1282016-06-07 17:19:15 +03001115 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
Chris Wilson56c0f1a2016-08-15 10:48:58 +01001116 u32 scratch_addr =
Chris Wilsonbde13eb2016-08-15 10:49:07 +01001117 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
Mika Kuoppala066d4622016-06-07 17:19:15 +03001118
1119 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1120 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1121 PIPE_CONTROL_GLOBAL_GTT_IVB |
1122 PIPE_CONTROL_CS_STALL |
1123 PIPE_CONTROL_QW_WRITE));
1124 wa_ctx_emit(batch, index, scratch_addr);
1125 wa_ctx_emit(batch, index, 0);
1126 wa_ctx_emit(batch, index, 0);
1127 wa_ctx_emit(batch, index, 0);
1128 }
Tim Gore3485d992016-07-05 10:01:30 +01001129
Ander Conselvan de Oliveira9fb50262017-01-26 11:16:58 +02001130 /* WaMediaPoolStateCmdInWABB:bxt,glk */
Tim Gore3485d992016-07-05 10:01:30 +01001131 if (HAS_POOLED_EU(engine->i915)) {
1132 /*
1133 * EU pool configuration is setup along with golden context
1134 * during context initialization. This value depends on
1135 * device type (2x6 or 3x6) and needs to be updated based
1136 * on which subslice is disabled especially for 2x6
1137 * devices, however it is safe to load default
1138 * configuration of 3x6 device instead of masking off
1139 * corresponding bits because HW ignores bits of a disabled
1140 * subslice and drops down to appropriate config. Please
1141 * see render_state_setup() in i915_gem_render_state.c for
1142 * possible configurations, to avoid duplication they are
1143 * not shown here again.
1144 */
1145 u32 eu_pool_config = 0x00777000;
1146 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
1147 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
1148 wa_ctx_emit(batch, index, eu_pool_config);
1149 wa_ctx_emit(batch, index, 0);
1150 wa_ctx_emit(batch, index, 0);
1151 wa_ctx_emit(batch, index, 0);
1152 }
1153
Arun Siluvery0504cff2015-07-14 15:01:27 +01001154 /* Pad to end of cacheline */
1155 while (index % CACHELINE_DWORDS)
1156 wa_ctx_emit(batch, index, MI_NOOP);
1157
1158 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1159}
1160
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001161static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001162 struct i915_wa_ctx_bb *wa_ctx,
Daniel Vetter6e5248b2016-07-15 21:48:06 +02001163 uint32_t *batch,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001164 uint32_t *offset)
1165{
1166 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1167
1168 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1169
1170 return wa_ctx_end(wa_ctx, *offset = index, 1);
1171}
1172
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001173static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001174{
Chris Wilson48bb74e2016-08-15 10:49:04 +01001175 struct drm_i915_gem_object *obj;
1176 struct i915_vma *vma;
1177 int err;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001178
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +00001179 obj = i915_gem_object_create(engine->i915, PAGE_ALIGN(size));
Chris Wilson48bb74e2016-08-15 10:49:04 +01001180 if (IS_ERR(obj))
1181 return PTR_ERR(obj);
1182
Chris Wilsona01cb372017-01-16 15:21:30 +00001183 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
Chris Wilson48bb74e2016-08-15 10:49:04 +01001184 if (IS_ERR(vma)) {
1185 err = PTR_ERR(vma);
1186 goto err;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001187 }
1188
Chris Wilson48bb74e2016-08-15 10:49:04 +01001189 err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH);
1190 if (err)
1191 goto err;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001192
Chris Wilson48bb74e2016-08-15 10:49:04 +01001193 engine->wa_ctx.vma = vma;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001194 return 0;
Chris Wilson48bb74e2016-08-15 10:49:04 +01001195
1196err:
1197 i915_gem_object_put(obj);
1198 return err;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001199}
1200
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001201static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001202{
Chris Wilson19880c42016-08-15 10:49:05 +01001203 i915_vma_unpin_and_release(&engine->wa_ctx.vma);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001204}
1205
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001206static int intel_init_workaround_bb(struct intel_engine_cs *engine)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001207{
Chris Wilson48bb74e2016-08-15 10:49:04 +01001208 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001209 uint32_t *batch;
1210 uint32_t offset;
1211 struct page *page;
Chris Wilson48bb74e2016-08-15 10:49:04 +01001212 int ret;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001213
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001214 WARN_ON(engine->id != RCS);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001215
Arun Siluvery5e60d792015-06-23 15:50:44 +01001216 /* update this when WA for higher Gen are added */
Chris Wilsonc0336662016-05-06 15:40:21 +01001217 if (INTEL_GEN(engine->i915) > 9) {
Arun Siluvery0504cff2015-07-14 15:01:27 +01001218 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
Chris Wilsonc0336662016-05-06 15:40:21 +01001219 INTEL_GEN(engine->i915));
Arun Siluvery5e60d792015-06-23 15:50:44 +01001220 return 0;
Arun Siluvery0504cff2015-07-14 15:01:27 +01001221 }
Arun Siluvery5e60d792015-06-23 15:50:44 +01001222
Arun Siluveryc4db7592015-06-19 18:37:11 +01001223 /* some WA perform writes to scratch page, ensure it is valid */
Chris Wilson56c0f1a2016-08-15 10:48:58 +01001224 if (!engine->scratch) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001225 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
Arun Siluveryc4db7592015-06-19 18:37:11 +01001226 return -EINVAL;
1227 }
1228
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001229 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001230 if (ret) {
1231 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1232 return ret;
1233 }
1234
Chris Wilson48bb74e2016-08-15 10:49:04 +01001235 page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001236 batch = kmap_atomic(page);
1237 offset = 0;
1238
Chris Wilsonc0336662016-05-06 15:40:21 +01001239 if (IS_GEN8(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001240 ret = gen8_init_indirectctx_bb(engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001241 &wa_ctx->indirect_ctx,
1242 batch,
1243 &offset);
1244 if (ret)
1245 goto out;
1246
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001247 ret = gen8_init_perctx_bb(engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001248 &wa_ctx->per_ctx,
1249 batch,
1250 &offset);
1251 if (ret)
1252 goto out;
Chris Wilsonc0336662016-05-06 15:40:21 +01001253 } else if (IS_GEN9(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001254 ret = gen9_init_indirectctx_bb(engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001255 &wa_ctx->indirect_ctx,
1256 batch,
1257 &offset);
1258 if (ret)
1259 goto out;
1260
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001261 ret = gen9_init_perctx_bb(engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001262 &wa_ctx->per_ctx,
1263 batch,
1264 &offset);
1265 if (ret)
1266 goto out;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001267 }
1268
1269out:
1270 kunmap_atomic(batch);
1271 if (ret)
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001272 lrc_destroy_wa_ctx_obj(engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001273
1274 return ret;
1275}
1276
Chris Wilson22cc4402017-02-04 11:05:19 +00001277static u32 port_seqno(struct execlist_port *port)
1278{
1279 return port->request ? port->request->global_seqno : 0;
1280}
1281
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001282static int gen8_init_common_ring(struct intel_engine_cs *engine)
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001283{
Chris Wilsonc0336662016-05-06 15:40:21 +01001284 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson821ed7d2016-09-09 14:11:53 +01001285 int ret;
1286
1287 ret = intel_mocs_init_engine(engine);
1288 if (ret)
1289 return ret;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001290
Chris Wilsonad07dfc2016-10-07 07:53:26 +01001291 intel_engine_reset_breadcrumbs(engine);
Chris Wilsonf3b8f912017-01-05 15:30:21 +00001292 intel_engine_init_hangcheck(engine);
Chris Wilson821ed7d2016-09-09 14:11:53 +01001293
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001294 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001295 I915_WRITE(RING_MODE_GEN7(engine),
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001296 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1297 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
Chris Wilsonf3b8f912017-01-05 15:30:21 +00001298 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1299 engine->status_page.ggtt_offset);
1300 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
Michel Thierrydfc53c52015-09-28 13:25:12 +01001301
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001302 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001303
Chris Wilsonc87d50c2016-10-04 21:11:27 +01001304 /* After a GPU reset, we may have requests to replay */
Chris Wilsonf7470262017-01-24 15:20:21 +00001305 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
Chris Wilsonc87d50c2016-10-04 21:11:27 +01001306 if (!execlists_elsp_idle(engine)) {
Chris Wilson22cc4402017-02-04 11:05:19 +00001307 DRM_DEBUG_DRIVER("Restarting %s from requests [0x%x, 0x%x]\n",
1308 engine->name,
1309 port_seqno(&engine->execlist_port[0]),
1310 port_seqno(&engine->execlist_port[1]));
Chris Wilsonc87d50c2016-10-04 21:11:27 +01001311 engine->execlist_port[0].count = 0;
1312 engine->execlist_port[1].count = 0;
Chris Wilson821ed7d2016-09-09 14:11:53 +01001313 execlists_submit_ports(engine);
Chris Wilsonc87d50c2016-10-04 21:11:27 +01001314 }
Chris Wilson821ed7d2016-09-09 14:11:53 +01001315
1316 return 0;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001317}
1318
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001319static int gen8_init_render_ring(struct intel_engine_cs *engine)
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001320{
Chris Wilsonc0336662016-05-06 15:40:21 +01001321 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001322 int ret;
1323
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001324 ret = gen8_init_common_ring(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001325 if (ret)
1326 return ret;
1327
1328 /* We need to disable the AsyncFlip performance optimisations in order
1329 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1330 * programmed to '1' on all products.
1331 *
1332 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1333 */
1334 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1335
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001336 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1337
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001338 return init_workarounds_ring(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001339}
1340
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001341static int gen9_init_render_ring(struct intel_engine_cs *engine)
Damien Lespiau82ef8222015-02-09 19:33:08 +00001342{
1343 int ret;
1344
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001345 ret = gen8_init_common_ring(engine);
Damien Lespiau82ef8222015-02-09 19:33:08 +00001346 if (ret)
1347 return ret;
1348
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001349 return init_workarounds_ring(engine);
Damien Lespiau82ef8222015-02-09 19:33:08 +00001350}
1351
Chris Wilson821ed7d2016-09-09 14:11:53 +01001352static void reset_common_ring(struct intel_engine_cs *engine,
1353 struct drm_i915_gem_request *request)
1354{
Chris Wilson821ed7d2016-09-09 14:11:53 +01001355 struct execlist_port *port = engine->execlist_port;
Chris Wilsonc0dcb202017-02-07 15:24:37 +00001356 struct intel_context *ce;
1357
1358 /* If the request was innocent, we leave the request in the ELSP
1359 * and will try to replay it on restarting. The context image may
1360 * have been corrupted by the reset, in which case we may have
1361 * to service a new GPU hang, but more likely we can continue on
1362 * without impact.
1363 *
1364 * If the request was guilty, we presume the context is corrupt
1365 * and have to at least restore the RING register in the context
1366 * image back to the expected values to skip over the guilty request.
1367 */
1368 if (!request || request->fence.error != -EIO)
1369 return;
Chris Wilson821ed7d2016-09-09 14:11:53 +01001370
Chris Wilsona3aabe82016-10-04 21:11:26 +01001371 /* We want a simple context + ring to execute the breadcrumb update.
1372 * We cannot rely on the context being intact across the GPU hang,
1373 * so clear it and rebuild just what we need for the breadcrumb.
1374 * All pending requests for this context will be zapped, and any
1375 * future request will be after userspace has had the opportunity
1376 * to recreate its own state.
1377 */
Chris Wilsonc0dcb202017-02-07 15:24:37 +00001378 ce = &request->ctx->engine[engine->id];
Chris Wilsona3aabe82016-10-04 21:11:26 +01001379 execlists_init_reg_state(ce->lrc_reg_state,
1380 request->ctx, engine, ce->ring);
1381
Chris Wilson821ed7d2016-09-09 14:11:53 +01001382 /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
Chris Wilsona3aabe82016-10-04 21:11:26 +01001383 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
1384 i915_ggtt_offset(ce->ring->vma);
Chris Wilson821ed7d2016-09-09 14:11:53 +01001385 ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
Chris Wilsona3aabe82016-10-04 21:11:26 +01001386
Chris Wilson821ed7d2016-09-09 14:11:53 +01001387 request->ring->head = request->postfix;
1388 request->ring->last_retired_head = -1;
1389 intel_ring_update_space(request->ring);
1390
1391 if (i915.enable_guc_submission)
1392 return;
1393
1394 /* Catch up with any missed context-switch interrupts */
Chris Wilson821ed7d2016-09-09 14:11:53 +01001395 if (request->ctx != port[0].request->ctx) {
1396 i915_gem_request_put(port[0].request);
1397 port[0] = port[1];
1398 memset(&port[1], 0, sizeof(port[1]));
1399 }
1400
Chris Wilson821ed7d2016-09-09 14:11:53 +01001401 GEM_BUG_ON(request->ctx != port[0].request->ctx);
Chris Wilsona3aabe82016-10-04 21:11:26 +01001402
1403 /* Reset WaIdleLiteRestore:bdw,skl as well */
1404 request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
Chris Wilson821ed7d2016-09-09 14:11:53 +01001405}
1406
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001407static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1408{
1409 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001410 struct intel_engine_cs *engine = req->engine;
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001411 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001412 u32 *cs;
1413 int i;
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001414
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001415 cs = intel_ring_begin(req, num_lri_cmds * 2 + 2);
1416 if (IS_ERR(cs))
1417 return PTR_ERR(cs);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001418
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001419 *cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001420 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1421 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1422
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001423 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
1424 *cs++ = upper_32_bits(pd_daddr);
1425 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
1426 *cs++ = lower_32_bits(pd_daddr);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001427 }
1428
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001429 *cs++ = MI_NOOP;
1430 intel_ring_advance(req, cs);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001431
1432 return 0;
1433}
1434
John Harrisonbe795fc2015-05-29 17:44:03 +01001435static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
Chris Wilson803688b2016-08-02 22:50:27 +01001436 u64 offset, u32 len,
1437 unsigned int dispatch_flags)
Oscar Mateo15648582014-07-24 17:04:32 +01001438{
John Harrison8e004ef2015-02-13 11:48:10 +00001439 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001440 u32 *cs;
Oscar Mateo15648582014-07-24 17:04:32 +01001441 int ret;
1442
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001443 /* Don't rely in hw updating PDPs, specially in lite-restore.
1444 * Ideally, we should set Force PD Restore in ctx descriptor,
1445 * but we can't. Force Restore would be a second option, but
1446 * it is unsafe in case of lite-restore (because the ctx is
Michel Thierry2dba3232015-07-30 11:06:23 +01001447 * not idle). PML4 is allocated during ppgtt init so this is
1448 * not needed in 48-bit.*/
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001449 if (req->ctx->ppgtt &&
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001450 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
Chris Wilson949e8ab2017-02-09 14:40:36 +00001451 if (!i915_vm_is_48bit(&req->ctx->ppgtt->base) &&
Chris Wilsonc0336662016-05-06 15:40:21 +01001452 !intel_vgpu_active(req->i915)) {
Michel Thierry2dba3232015-07-30 11:06:23 +01001453 ret = intel_logical_ring_emit_pdps(req);
1454 if (ret)
1455 return ret;
1456 }
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001457
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001458 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001459 }
1460
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001461 cs = intel_ring_begin(req, 4);
1462 if (IS_ERR(cs))
1463 return PTR_ERR(cs);
Oscar Mateo15648582014-07-24 17:04:32 +01001464
1465 /* FIXME(BDW): Address space and security selectors. */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001466 *cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
1467 I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
1468 *cs++ = lower_32_bits(offset);
1469 *cs++ = upper_32_bits(offset);
1470 *cs++ = MI_NOOP;
1471 intel_ring_advance(req, cs);
Oscar Mateo15648582014-07-24 17:04:32 +01001472
1473 return 0;
1474}
1475
Chris Wilson31bb59c2016-07-01 17:23:27 +01001476static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
Oscar Mateo73d477f2014-07-24 17:04:31 +01001477{
Chris Wilsonc0336662016-05-06 15:40:21 +01001478 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson31bb59c2016-07-01 17:23:27 +01001479 I915_WRITE_IMR(engine,
1480 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1481 POSTING_READ_FW(RING_IMR(engine->mmio_base));
Oscar Mateo73d477f2014-07-24 17:04:31 +01001482}
1483
Chris Wilson31bb59c2016-07-01 17:23:27 +01001484static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
Oscar Mateo73d477f2014-07-24 17:04:31 +01001485{
Chris Wilsonc0336662016-05-06 15:40:21 +01001486 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson31bb59c2016-07-01 17:23:27 +01001487 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
Oscar Mateo73d477f2014-07-24 17:04:31 +01001488}
1489
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001490static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
Oscar Mateo47122742014-07-24 17:04:28 +01001491{
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001492 u32 cmd, *cs;
Oscar Mateo47122742014-07-24 17:04:28 +01001493
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001494 cs = intel_ring_begin(request, 4);
1495 if (IS_ERR(cs))
1496 return PTR_ERR(cs);
Oscar Mateo47122742014-07-24 17:04:28 +01001497
1498 cmd = MI_FLUSH_DW + 1;
1499
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001500 /* We always require a command barrier so that subsequent
1501 * commands, such as breadcrumb interrupts, are strictly ordered
1502 * wrt the contents of the write cache being flushed to memory
1503 * (and thus being coherent from the CPU).
1504 */
1505 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1506
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001507 if (mode & EMIT_INVALIDATE) {
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001508 cmd |= MI_INVALIDATE_TLB;
Chris Wilson1dae2df2016-08-02 22:50:19 +01001509 if (request->engine->id == VCS)
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001510 cmd |= MI_INVALIDATE_BSD;
Oscar Mateo47122742014-07-24 17:04:28 +01001511 }
1512
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001513 *cs++ = cmd;
1514 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1515 *cs++ = 0; /* upper addr */
1516 *cs++ = 0; /* value */
1517 intel_ring_advance(request, cs);
Oscar Mateo47122742014-07-24 17:04:28 +01001518
1519 return 0;
1520}
1521
John Harrison7deb4d32015-05-29 17:43:59 +01001522static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001523 u32 mode)
Oscar Mateo47122742014-07-24 17:04:28 +01001524{
Chris Wilsonb5321f32016-08-02 22:50:18 +01001525 struct intel_engine_cs *engine = request->engine;
Chris Wilsonbde13eb2016-08-15 10:49:07 +01001526 u32 scratch_addr =
1527 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001528 bool vf_flush_wa = false, dc_flush_wa = false;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001529 u32 *cs, flags = 0;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001530 int len;
Oscar Mateo47122742014-07-24 17:04:28 +01001531
1532 flags |= PIPE_CONTROL_CS_STALL;
1533
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001534 if (mode & EMIT_FLUSH) {
Oscar Mateo47122742014-07-24 17:04:28 +01001535 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1536 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
Francisco Jerez965fd602016-01-13 18:59:39 -08001537 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
Chris Wilson40a24482015-08-21 16:08:41 +01001538 flags |= PIPE_CONTROL_FLUSH_ENABLE;
Oscar Mateo47122742014-07-24 17:04:28 +01001539 }
1540
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001541 if (mode & EMIT_INVALIDATE) {
Oscar Mateo47122742014-07-24 17:04:28 +01001542 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1543 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1544 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1545 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1546 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1547 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1548 flags |= PIPE_CONTROL_QW_WRITE;
1549 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
Oscar Mateo47122742014-07-24 17:04:28 +01001550
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001551 /*
1552 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1553 * pipe control.
1554 */
Chris Wilsonc0336662016-05-06 15:40:21 +01001555 if (IS_GEN9(request->i915))
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001556 vf_flush_wa = true;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001557
1558 /* WaForGAMHang:kbl */
1559 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
1560 dc_flush_wa = true;
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001561 }
Imre Deak9647ff32015-01-25 13:27:11 -08001562
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001563 len = 6;
1564
1565 if (vf_flush_wa)
1566 len += 6;
1567
1568 if (dc_flush_wa)
1569 len += 12;
1570
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001571 cs = intel_ring_begin(request, len);
1572 if (IS_ERR(cs))
1573 return PTR_ERR(cs);
Oscar Mateo47122742014-07-24 17:04:28 +01001574
Imre Deak9647ff32015-01-25 13:27:11 -08001575 if (vf_flush_wa) {
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001576 *cs++ = GFX_OP_PIPE_CONTROL(6);
1577 *cs++ = 0;
1578 *cs++ = 0;
1579 *cs++ = 0;
1580 *cs++ = 0;
1581 *cs++ = 0;
Imre Deak9647ff32015-01-25 13:27:11 -08001582 }
1583
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001584 if (dc_flush_wa) {
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001585 *cs++ = GFX_OP_PIPE_CONTROL(6);
1586 *cs++ = PIPE_CONTROL_DC_FLUSH_ENABLE;
1587 *cs++ = 0;
1588 *cs++ = 0;
1589 *cs++ = 0;
1590 *cs++ = 0;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001591 }
1592
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001593 *cs++ = GFX_OP_PIPE_CONTROL(6);
1594 *cs++ = flags;
1595 *cs++ = scratch_addr;
1596 *cs++ = 0;
1597 *cs++ = 0;
1598 *cs++ = 0;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001599
1600 if (dc_flush_wa) {
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001601 *cs++ = GFX_OP_PIPE_CONTROL(6);
1602 *cs++ = PIPE_CONTROL_CS_STALL;
1603 *cs++ = 0;
1604 *cs++ = 0;
1605 *cs++ = 0;
1606 *cs++ = 0;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001607 }
1608
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001609 intel_ring_advance(request, cs);
Oscar Mateo47122742014-07-24 17:04:28 +01001610
1611 return 0;
1612}
1613
Chris Wilson7c17d372016-01-20 15:43:35 +02001614/*
1615 * Reserve space for 2 NOOPs at the end of each request to be
1616 * used as a workaround for not being allowed to do lite
1617 * restore with HEAD==TAIL (WaIdleLiteRestore).
1618 */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001619static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
Oscar Mateo4da46e12014-07-24 17:04:27 +01001620{
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001621 *cs++ = MI_NOOP;
1622 *cs++ = MI_NOOP;
1623 request->wa_tail = intel_ring_offset(request, cs);
Chris Wilsoncaddfe72016-10-28 13:58:52 +01001624}
Oscar Mateo4da46e12014-07-24 17:04:27 +01001625
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001626static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
Chris Wilsoncaddfe72016-10-28 13:58:52 +01001627{
Chris Wilson7c17d372016-01-20 15:43:35 +02001628 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1629 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
Oscar Mateo4da46e12014-07-24 17:04:27 +01001630
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001631 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
1632 *cs++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
1633 *cs++ = 0;
1634 *cs++ = request->global_seqno;
1635 *cs++ = MI_USER_INTERRUPT;
1636 *cs++ = MI_NOOP;
1637 request->tail = intel_ring_offset(request, cs);
Chris Wilsoncaddfe72016-10-28 13:58:52 +01001638
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001639 gen8_emit_wa_tail(request, cs);
Chris Wilson7c17d372016-01-20 15:43:35 +02001640}
Oscar Mateo4da46e12014-07-24 17:04:27 +01001641
Chris Wilson98f29e82016-10-28 13:58:51 +01001642static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
1643
Chris Wilsoncaddfe72016-10-28 13:58:52 +01001644static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001645 u32 *cs)
Chris Wilson7c17d372016-01-20 15:43:35 +02001646{
Michał Winiarskice81a652016-04-12 15:51:55 +02001647 /* We're using qword write, seqno should be aligned to 8 bytes. */
1648 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1649
Chris Wilson7c17d372016-01-20 15:43:35 +02001650 /* w/a for post sync ops following a GPGPU operation we
1651 * need a prior CS_STALL, which is emitted by the flush
1652 * following the batch.
Michel Thierry53292cd2015-04-15 18:11:33 +01001653 */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001654 *cs++ = GFX_OP_PIPE_CONTROL(6);
1655 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
1656 PIPE_CONTROL_QW_WRITE;
1657 *cs++ = intel_hws_seqno_address(request->engine);
1658 *cs++ = 0;
1659 *cs++ = request->global_seqno;
Michał Winiarskice81a652016-04-12 15:51:55 +02001660 /* We're thrashing one dword of HWS. */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001661 *cs++ = 0;
1662 *cs++ = MI_USER_INTERRUPT;
1663 *cs++ = MI_NOOP;
1664 request->tail = intel_ring_offset(request, cs);
Chris Wilsoncaddfe72016-10-28 13:58:52 +01001665
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001666 gen8_emit_wa_tail(request, cs);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001667}
1668
Chris Wilson98f29e82016-10-28 13:58:51 +01001669static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
1670
John Harrison87531812015-05-29 17:43:44 +01001671static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
Thomas Daniele7778be2014-12-02 12:50:48 +00001672{
1673 int ret;
1674
John Harrisone2be4fa2015-05-29 17:43:54 +01001675 ret = intel_logical_ring_workarounds_emit(req);
Thomas Daniele7778be2014-12-02 12:50:48 +00001676 if (ret)
1677 return ret;
1678
Peter Antoine3bbaba02015-07-10 20:13:11 +03001679 ret = intel_rcs_context_init_mocs(req);
1680 /*
1681 * Failing to program the MOCS is non-fatal.The system will not
1682 * run at peak performance. So generate an error and carry on.
1683 */
1684 if (ret)
1685 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1686
Chris Wilson4e50f082016-10-28 13:58:31 +01001687 return i915_gem_render_state_emit(req);
Thomas Daniele7778be2014-12-02 12:50:48 +00001688}
1689
Oscar Mateo73e4d072014-07-24 17:04:48 +01001690/**
1691 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001692 * @engine: Engine Command Streamer.
Oscar Mateo73e4d072014-07-24 17:04:48 +01001693 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001694void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
Oscar Mateo454afeb2014-07-24 17:04:22 +01001695{
John Harrison6402c332014-10-31 12:00:26 +00001696 struct drm_i915_private *dev_priv;
Oscar Mateo9832b9d2014-07-24 17:04:30 +01001697
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +01001698 /*
1699 * Tasklet cannot be active at this point due intel_mark_active/idle
1700 * so this is just for documentation.
1701 */
1702 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1703 tasklet_kill(&engine->irq_tasklet);
1704
Chris Wilsonc0336662016-05-06 15:40:21 +01001705 dev_priv = engine->i915;
John Harrison6402c332014-10-31 12:00:26 +00001706
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001707 if (engine->buffer) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001708 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
Dave Gordonb0366a52015-12-08 15:02:36 +00001709 }
Oscar Mateo48d82382014-07-24 17:04:23 +01001710
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001711 if (engine->cleanup)
1712 engine->cleanup(engine);
Oscar Mateo48d82382014-07-24 17:04:23 +01001713
Chris Wilson57e88532016-08-15 10:48:57 +01001714 if (engine->status_page.vma) {
1715 i915_gem_object_unpin_map(engine->status_page.vma->obj);
1716 engine->status_page.vma = NULL;
Oscar Mateo48d82382014-07-24 17:04:23 +01001717 }
Chris Wilsone8a9c582016-12-18 15:37:20 +00001718
1719 intel_engine_cleanup_common(engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001720
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001721 lrc_destroy_wa_ctx_obj(engine);
Chris Wilsonc0336662016-05-06 15:40:21 +01001722 engine->i915 = NULL;
Akash Goel3b3f1652016-10-13 22:44:48 +05301723 dev_priv->engine[engine->id] = NULL;
1724 kfree(engine);
Oscar Mateo454afeb2014-07-24 17:04:22 +01001725}
1726
Chris Wilsonddd66c52016-08-02 22:50:31 +01001727void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
1728{
1729 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05301730 enum intel_engine_id id;
Chris Wilsonddd66c52016-08-02 22:50:31 +01001731
Chris Wilson20311bd2016-11-14 20:41:03 +00001732 for_each_engine(engine, dev_priv, id) {
Chris Wilsonf4ea6bd2016-08-02 22:50:32 +01001733 engine->submit_request = execlists_submit_request;
Chris Wilson20311bd2016-11-14 20:41:03 +00001734 engine->schedule = execlists_schedule;
1735 }
Chris Wilsonddd66c52016-08-02 22:50:31 +01001736}
1737
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001738static void
Chris Wilsone1382ef2016-05-06 15:40:20 +01001739logical_ring_default_vfuncs(struct intel_engine_cs *engine)
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001740{
1741 /* Default vfuncs which can be overriden by each engine. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001742 engine->init_hw = gen8_init_common_ring;
Chris Wilson821ed7d2016-09-09 14:11:53 +01001743 engine->reset_hw = reset_common_ring;
Chris Wilsone8a9c582016-12-18 15:37:20 +00001744
1745 engine->context_pin = execlists_context_pin;
1746 engine->context_unpin = execlists_context_unpin;
1747
Chris Wilsonf73e7392016-12-18 15:37:24 +00001748 engine->request_alloc = execlists_request_alloc;
1749
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001750 engine->emit_flush = gen8_emit_flush;
Chris Wilson9b81d552016-10-28 13:58:50 +01001751 engine->emit_breadcrumb = gen8_emit_breadcrumb;
Chris Wilson98f29e82016-10-28 13:58:51 +01001752 engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
Chris Wilsonf4ea6bd2016-08-02 22:50:32 +01001753 engine->submit_request = execlists_submit_request;
Chris Wilson20311bd2016-11-14 20:41:03 +00001754 engine->schedule = execlists_schedule;
Chris Wilsonddd66c52016-08-02 22:50:31 +01001755
Chris Wilson31bb59c2016-07-01 17:23:27 +01001756 engine->irq_enable = gen8_logical_ring_enable_irq;
1757 engine->irq_disable = gen8_logical_ring_disable_irq;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001758 engine->emit_bb_start = gen8_emit_bb_start;
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001759}
1760
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001761static inline void
Dave Gordonc2c7f242016-07-13 16:03:35 +01001762logical_ring_default_irqs(struct intel_engine_cs *engine)
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001763{
Dave Gordonc2c7f242016-07-13 16:03:35 +01001764 unsigned shift = engine->irq_shift;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001765 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1766 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001767}
1768
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001769static int
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001770lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001771{
Chris Wilson57e88532016-08-15 10:48:57 +01001772 const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001773 void *hws;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001774
1775 /* The HWSP is part of the default context object in LRC mode. */
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001776 hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001777 if (IS_ERR(hws))
1778 return PTR_ERR(hws);
Chris Wilson57e88532016-08-15 10:48:57 +01001779
1780 engine->status_page.page_addr = hws + hws_offset;
Chris Wilsonbde13eb2016-08-15 10:49:07 +01001781 engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset;
Chris Wilson57e88532016-08-15 10:48:57 +01001782 engine->status_page.vma = vma;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001783
1784 return 0;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001785}
1786
Tvrtko Ursulinbb454382016-07-13 16:03:36 +01001787static void
1788logical_ring_setup(struct intel_engine_cs *engine)
1789{
1790 struct drm_i915_private *dev_priv = engine->i915;
1791 enum forcewake_domains fw_domains;
1792
Tvrtko Ursulin019bf272016-07-13 16:03:41 +01001793 intel_engine_setup_common(engine);
1794
Tvrtko Ursulinbb454382016-07-13 16:03:36 +01001795 /* Intentionally left blank. */
1796 engine->buffer = NULL;
1797
1798 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
1799 RING_ELSP(engine),
1800 FW_REG_WRITE);
1801
1802 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1803 RING_CONTEXT_STATUS_PTR(engine),
1804 FW_REG_READ | FW_REG_WRITE);
1805
1806 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1807 RING_CONTEXT_STATUS_BUF_BASE(engine),
1808 FW_REG_READ);
1809
1810 engine->fw_domains = fw_domains;
1811
Tvrtko Ursulinbb454382016-07-13 16:03:36 +01001812 tasklet_init(&engine->irq_tasklet,
1813 intel_lrc_irq_handler, (unsigned long)engine);
1814
Tvrtko Ursulinbb454382016-07-13 16:03:36 +01001815 logical_ring_default_vfuncs(engine);
1816 logical_ring_default_irqs(engine);
Tvrtko Ursulinbb454382016-07-13 16:03:36 +01001817}
1818
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001819static int
1820logical_ring_init(struct intel_engine_cs *engine)
1821{
1822 struct i915_gem_context *dctx = engine->i915->kernel_context;
1823 int ret;
1824
Tvrtko Ursulin019bf272016-07-13 16:03:41 +01001825 ret = intel_engine_init_common(engine);
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001826 if (ret)
1827 goto error;
1828
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001829 /* And setup the hardware status page. */
1830 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
1831 if (ret) {
1832 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
1833 goto error;
1834 }
1835
1836 return 0;
1837
1838error:
1839 intel_logical_ring_cleanup(engine);
1840 return ret;
1841}
1842
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +01001843int logical_render_ring_init(struct intel_engine_cs *engine)
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001844{
1845 struct drm_i915_private *dev_priv = engine->i915;
1846 int ret;
1847
Tvrtko Ursulinbb454382016-07-13 16:03:36 +01001848 logical_ring_setup(engine);
1849
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001850 if (HAS_L3_DPF(dev_priv))
1851 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1852
1853 /* Override some for render ring. */
1854 if (INTEL_GEN(dev_priv) >= 9)
1855 engine->init_hw = gen9_init_render_ring;
1856 else
1857 engine->init_hw = gen8_init_render_ring;
1858 engine->init_context = gen8_init_rcs_context;
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001859 engine->emit_flush = gen8_emit_flush_render;
Chris Wilson9b81d552016-10-28 13:58:50 +01001860 engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
Chris Wilson98f29e82016-10-28 13:58:51 +01001861 engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001862
Chris Wilsonf51455d2017-01-10 14:47:34 +00001863 ret = intel_engine_create_scratch(engine, PAGE_SIZE);
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001864 if (ret)
1865 return ret;
1866
1867 ret = intel_init_workaround_bb(engine);
1868 if (ret) {
1869 /*
1870 * We continue even if we fail to initialize WA batch
1871 * because we only expect rare glitches but nothing
1872 * critical to prevent us from using GPU
1873 */
1874 DRM_ERROR("WA batch buffer initialization failed: %d\n",
1875 ret);
1876 }
1877
Tvrtko Ursulind038fc72016-12-16 13:18:42 +00001878 return logical_ring_init(engine);
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001879}
1880
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +01001881int logical_xcs_ring_init(struct intel_engine_cs *engine)
Tvrtko Ursulinbb454382016-07-13 16:03:36 +01001882{
1883 logical_ring_setup(engine);
1884
1885 return logical_ring_init(engine);
1886}
1887
Jeff McGee0cea6502015-02-13 10:27:56 -06001888static u32
Chris Wilsonc0336662016-05-06 15:40:21 +01001889make_rpcs(struct drm_i915_private *dev_priv)
Jeff McGee0cea6502015-02-13 10:27:56 -06001890{
1891 u32 rpcs = 0;
1892
1893 /*
1894 * No explicit RPCS request is needed to ensure full
1895 * slice/subslice/EU enablement prior to Gen9.
1896 */
Chris Wilsonc0336662016-05-06 15:40:21 +01001897 if (INTEL_GEN(dev_priv) < 9)
Jeff McGee0cea6502015-02-13 10:27:56 -06001898 return 0;
1899
1900 /*
1901 * Starting in Gen9, render power gating can leave
1902 * slice/subslice/EU in a partially enabled state. We
1903 * must make an explicit request through RPCS for full
1904 * enablement.
1905 */
Imre Deak43b67992016-08-31 19:13:02 +03001906 if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
Jeff McGee0cea6502015-02-13 10:27:56 -06001907 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
Imre Deakf08a0c92016-08-31 19:13:04 +03001908 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
Jeff McGee0cea6502015-02-13 10:27:56 -06001909 GEN8_RPCS_S_CNT_SHIFT;
1910 rpcs |= GEN8_RPCS_ENABLE;
1911 }
1912
Imre Deak43b67992016-08-31 19:13:02 +03001913 if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
Jeff McGee0cea6502015-02-13 10:27:56 -06001914 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
Imre Deak57ec1712016-08-31 19:13:05 +03001915 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
Jeff McGee0cea6502015-02-13 10:27:56 -06001916 GEN8_RPCS_SS_CNT_SHIFT;
1917 rpcs |= GEN8_RPCS_ENABLE;
1918 }
1919
Imre Deak43b67992016-08-31 19:13:02 +03001920 if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
1921 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06001922 GEN8_RPCS_EU_MIN_SHIFT;
Imre Deak43b67992016-08-31 19:13:02 +03001923 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06001924 GEN8_RPCS_EU_MAX_SHIFT;
1925 rpcs |= GEN8_RPCS_ENABLE;
1926 }
1927
1928 return rpcs;
1929}
1930
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001931static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
Michel Thierry71562912016-02-23 10:31:49 +00001932{
1933 u32 indirect_ctx_offset;
1934
Chris Wilsonc0336662016-05-06 15:40:21 +01001935 switch (INTEL_GEN(engine->i915)) {
Michel Thierry71562912016-02-23 10:31:49 +00001936 default:
Chris Wilsonc0336662016-05-06 15:40:21 +01001937 MISSING_CASE(INTEL_GEN(engine->i915));
Michel Thierry71562912016-02-23 10:31:49 +00001938 /* fall through */
1939 case 9:
1940 indirect_ctx_offset =
1941 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
1942 break;
1943 case 8:
1944 indirect_ctx_offset =
1945 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
1946 break;
1947 }
1948
1949 return indirect_ctx_offset;
1950}
1951
Chris Wilsona3aabe82016-10-04 21:11:26 +01001952static void execlists_init_reg_state(u32 *reg_state,
1953 struct i915_gem_context *ctx,
1954 struct intel_engine_cs *engine,
1955 struct intel_ring *ring)
Oscar Mateo8670d6f2014-07-24 17:04:17 +01001956{
Chris Wilsona3aabe82016-10-04 21:11:26 +01001957 struct drm_i915_private *dev_priv = engine->i915;
1958 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01001959
1960 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1961 * commands followed by (reg, value) pairs. The values we are setting here are
1962 * only for the first context restore: on a subsequent save, the GPU will
1963 * recreate this batchbuffer with new values (including all the missing
1964 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02001965 reg_state[CTX_LRI_HEADER_0] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001966 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
1967 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
1968 RING_CONTEXT_CONTROL(engine),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02001969 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
1970 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
Chris Wilsonc0336662016-05-06 15:40:21 +01001971 (HAS_RESOURCE_STREAMER(dev_priv) ?
Chris Wilsona3aabe82016-10-04 21:11:26 +01001972 CTX_CTRL_RS_CTX_ENABLE : 0)));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001973 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
1974 0);
1975 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
1976 0);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001977 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
1978 RING_START(engine->mmio_base), 0);
1979 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
1980 RING_CTL(engine->mmio_base),
Chris Wilson62ae14b2016-10-04 21:11:25 +01001981 RING_CTL_SIZE(ring->size) | RING_VALID);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001982 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
1983 RING_BBADDR_UDW(engine->mmio_base), 0);
1984 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
1985 RING_BBADDR(engine->mmio_base), 0);
1986 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
1987 RING_BBSTATE(engine->mmio_base),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02001988 RING_BB_PPGTT);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001989 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
1990 RING_SBBADDR_UDW(engine->mmio_base), 0);
1991 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
1992 RING_SBBADDR(engine->mmio_base), 0);
1993 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
1994 RING_SBBSTATE(engine->mmio_base), 0);
1995 if (engine->id == RCS) {
1996 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
1997 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
1998 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
1999 RING_INDIRECT_CTX(engine->mmio_base), 0);
2000 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2001 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
Chris Wilson48bb74e2016-08-15 10:49:04 +01002002 if (engine->wa_ctx.vma) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002003 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
Chris Wilsonbde13eb2016-08-15 10:49:07 +01002004 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
Arun Siluvery17ee9502015-06-19 19:07:01 +01002005
2006 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2007 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2008 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2009
2010 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002011 intel_lr_indirect_ctx_offset(engine) << 6;
Arun Siluvery17ee9502015-06-19 19:07:01 +01002012
2013 reg_state[CTX_BB_PER_CTX_PTR+1] =
2014 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2015 0x01;
2016 }
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002017 }
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002018 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002019 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2020 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002021 /* PDP values well be assigned later if needed */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002022 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2023 0);
2024 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2025 0);
2026 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2027 0);
2028 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2029 0);
2030 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2031 0);
2032 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2033 0);
2034 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2035 0);
2036 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2037 0);
Michel Thierryd7b26332015-04-08 12:13:34 +01002038
Chris Wilson949e8ab2017-02-09 14:40:36 +00002039 if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
Michel Thierry2dba3232015-07-30 11:06:23 +01002040 /* 64b PPGTT (48bit canonical)
2041 * PDP0_DESCRIPTOR contains the base address to PML4 and
2042 * other PDP Descriptors are ignored.
2043 */
2044 ASSIGN_CTX_PML4(ppgtt, reg_state);
Michel Thierry2dba3232015-07-30 11:06:23 +01002045 }
2046
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002047 if (engine->id == RCS) {
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002048 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002049 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
Chris Wilsonc0336662016-05-06 15:40:21 +01002050 make_rpcs(dev_priv));
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002051 }
Chris Wilsona3aabe82016-10-04 21:11:26 +01002052}
2053
2054static int
2055populate_lr_context(struct i915_gem_context *ctx,
2056 struct drm_i915_gem_object *ctx_obj,
2057 struct intel_engine_cs *engine,
2058 struct intel_ring *ring)
2059{
2060 void *vaddr;
2061 int ret;
2062
2063 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2064 if (ret) {
2065 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2066 return ret;
2067 }
2068
2069 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
2070 if (IS_ERR(vaddr)) {
2071 ret = PTR_ERR(vaddr);
2072 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
2073 return ret;
2074 }
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002075 ctx_obj->mm.dirty = true;
Chris Wilsona3aabe82016-10-04 21:11:26 +01002076
2077 /* The second page of the context object contains some fields which must
2078 * be set up prior to the first execution. */
2079
2080 execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
2081 ctx, engine, ring);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002082
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002083 i915_gem_object_unpin_map(ctx_obj);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002084
2085 return 0;
2086}
2087
Oscar Mateo73e4d072014-07-24 17:04:48 +01002088/**
Dave Gordonc5d46ee2016-01-05 12:21:33 +00002089 * intel_lr_context_size() - return the size of the context for an engine
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002090 * @engine: which engine to find the context size for
Dave Gordonc5d46ee2016-01-05 12:21:33 +00002091 *
2092 * Each engine may require a different amount of space for a context image,
2093 * so when allocating (or copying) an image, this function can be used to
2094 * find the right size for the specific engine.
2095 *
2096 * Return: size (in bytes) of an engine-specific context image
2097 *
2098 * Note: this size includes the HWSP, which is part of the context image
2099 * in LRC mode, but does not include the "shared data page" used with
2100 * GuC submission. The caller should account for this if using the GuC.
2101 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002102uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
Oscar Mateo8c8579172014-07-24 17:04:14 +01002103{
2104 int ret = 0;
2105
Chris Wilsonc0336662016-05-06 15:40:21 +01002106 WARN_ON(INTEL_GEN(engine->i915) < 8);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002107
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002108 switch (engine->id) {
Oscar Mateo8c8579172014-07-24 17:04:14 +01002109 case RCS:
Chris Wilsonc0336662016-05-06 15:40:21 +01002110 if (INTEL_GEN(engine->i915) >= 9)
Michael H. Nguyen468c6812014-11-13 17:51:49 +00002111 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2112 else
2113 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002114 break;
2115 case VCS:
2116 case BCS:
2117 case VECS:
2118 case VCS2:
2119 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2120 break;
2121 }
2122
2123 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01002124}
2125
Chris Wilsone2efd132016-05-24 14:53:34 +01002126static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
Chris Wilson978f1e02016-04-28 09:56:54 +01002127 struct intel_engine_cs *engine)
Oscar Mateoede7d422014-07-24 17:04:12 +01002128{
Oscar Mateo8c8579172014-07-24 17:04:14 +01002129 struct drm_i915_gem_object *ctx_obj;
Chris Wilson9021ad02016-05-24 14:53:37 +01002130 struct intel_context *ce = &ctx->engine[engine->id];
Chris Wilsonbf3783e2016-08-15 10:48:54 +01002131 struct i915_vma *vma;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002132 uint32_t context_size;
Chris Wilson7e37f882016-08-02 22:50:21 +01002133 struct intel_ring *ring;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002134 int ret;
2135
Chris Wilson9021ad02016-05-24 14:53:37 +01002136 WARN_ON(ce->state);
Oscar Mateoede7d422014-07-24 17:04:12 +01002137
Chris Wilsonf51455d2017-01-10 14:47:34 +00002138 context_size = round_up(intel_lr_context_size(engine),
2139 I915_GTT_PAGE_SIZE);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002140
Alex Daid1675192015-08-12 15:43:43 +01002141 /* One extra page as the sharing data between driver and GuC */
2142 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2143
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +00002144 ctx_obj = i915_gem_object_create(ctx->i915, context_size);
Chris Wilsonfe3db792016-04-25 13:32:13 +01002145 if (IS_ERR(ctx_obj)) {
Dan Carpenter3126a662015-04-30 17:30:50 +03002146 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
Chris Wilsonfe3db792016-04-25 13:32:13 +01002147 return PTR_ERR(ctx_obj);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002148 }
2149
Chris Wilsona01cb372017-01-16 15:21:30 +00002150 vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
Chris Wilsonbf3783e2016-08-15 10:48:54 +01002151 if (IS_ERR(vma)) {
2152 ret = PTR_ERR(vma);
2153 goto error_deref_obj;
2154 }
2155
Chris Wilson7e37f882016-08-02 22:50:21 +01002156 ring = intel_engine_create_ring(engine, ctx->ring_size);
Chris Wilsondca33ec2016-08-02 22:50:20 +01002157 if (IS_ERR(ring)) {
2158 ret = PTR_ERR(ring);
Nick Hoathe84fe802015-09-11 12:53:46 +01002159 goto error_deref_obj;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002160 }
2161
Chris Wilsondca33ec2016-08-02 22:50:20 +01002162 ret = populate_lr_context(ctx, ctx_obj, engine, ring);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002163 if (ret) {
2164 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
Chris Wilsondca33ec2016-08-02 22:50:20 +01002165 goto error_ring_free;
Oscar Mateo84c23772014-07-24 17:04:15 +01002166 }
2167
Chris Wilsondca33ec2016-08-02 22:50:20 +01002168 ce->ring = ring;
Chris Wilsonbf3783e2016-08-15 10:48:54 +01002169 ce->state = vma;
Chris Wilson9021ad02016-05-24 14:53:37 +01002170 ce->initialised = engine->init_context == NULL;
Oscar Mateoede7d422014-07-24 17:04:12 +01002171
2172 return 0;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002173
Chris Wilsondca33ec2016-08-02 22:50:20 +01002174error_ring_free:
Chris Wilson7e37f882016-08-02 22:50:21 +01002175 intel_ring_free(ring);
Nick Hoathe84fe802015-09-11 12:53:46 +01002176error_deref_obj:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01002177 i915_gem_object_put(ctx_obj);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002178 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01002179}
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002180
Chris Wilson821ed7d2016-09-09 14:11:53 +01002181void intel_lr_context_resume(struct drm_i915_private *dev_priv)
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002182{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002183 struct intel_engine_cs *engine;
Chris Wilsonbafb2f72016-09-21 14:51:08 +01002184 struct i915_gem_context *ctx;
Akash Goel3b3f1652016-10-13 22:44:48 +05302185 enum intel_engine_id id;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002186
Chris Wilsonbafb2f72016-09-21 14:51:08 +01002187 /* Because we emit WA_TAIL_DWORDS there may be a disparity
2188 * between our bookkeeping in ce->ring->head and ce->ring->tail and
2189 * that stored in context. As we only write new commands from
2190 * ce->ring->tail onwards, everything before that is junk. If the GPU
2191 * starts reading from its RING_HEAD from the context, it may try to
2192 * execute that junk and die.
2193 *
2194 * So to avoid that we reset the context images upon resume. For
2195 * simplicity, we just zero everything out.
2196 */
2197 list_for_each_entry(ctx, &dev_priv->context_list, link) {
Akash Goel3b3f1652016-10-13 22:44:48 +05302198 for_each_engine(engine, dev_priv, id) {
Chris Wilsonbafb2f72016-09-21 14:51:08 +01002199 struct intel_context *ce = &ctx->engine[engine->id];
2200 u32 *reg;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002201
Chris Wilsonbafb2f72016-09-21 14:51:08 +01002202 if (!ce->state)
2203 continue;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002204
Chris Wilsonbafb2f72016-09-21 14:51:08 +01002205 reg = i915_gem_object_pin_map(ce->state->obj,
2206 I915_MAP_WB);
2207 if (WARN_ON(IS_ERR(reg)))
2208 continue;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002209
Chris Wilsonbafb2f72016-09-21 14:51:08 +01002210 reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
2211 reg[CTX_RING_HEAD+1] = 0;
2212 reg[CTX_RING_TAIL+1] = 0;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002213
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002214 ce->state->obj->mm.dirty = true;
Chris Wilsonbafb2f72016-09-21 14:51:08 +01002215 i915_gem_object_unpin_map(ce->state->obj);
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002216
Chris Wilsonbafb2f72016-09-21 14:51:08 +01002217 ce->ring->head = ce->ring->tail = 0;
2218 ce->ring->last_retired_head = -1;
2219 intel_ring_update_space(ce->ring);
2220 }
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002221 }
2222}