blob: 87778f03393b3e09225402efbebd3ab15aacca4e [file] [log] [blame]
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
Chris Wilsonf636edb2017-10-09 12:02:57 +010025#include <drm/drm_print.h>
26
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010027#include "i915_drv.h"
Weinan Li1fd51d92017-10-15 11:55:25 +080028#include "i915_vgpu.h"
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010029#include "intel_ringbuffer.h"
30#include "intel_lrc.h"
31
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +030032/* Haswell does have the CXT_SIZE register however it does not appear to be
33 * valid. Now, docs explain in dwords what is in the context object. The full
34 * size is 70720 bytes, however, the power context and execlist context will
35 * never be saved (power context is stored elsewhere, and execlists don't work
36 * on HSW) - so the final size, including the extra state required for the
37 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
38 */
39#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
40/* Same as Haswell, but 72064 bytes now. */
41#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
42
43#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
44#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
Oscar Mateo3cf19342017-10-04 08:39:52 -070045#define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +030046
47#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
48
Oscar Mateob8400f02017-04-10 07:34:32 -070049struct engine_class_info {
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010050 const char *name;
Oscar Mateob8400f02017-04-10 07:34:32 -070051 int (*init_legacy)(struct intel_engine_cs *engine);
52 int (*init_execlists)(struct intel_engine_cs *engine);
53};
54
55static const struct engine_class_info intel_engine_classes[] = {
56 [RENDER_CLASS] = {
57 .name = "rcs",
58 .init_execlists = logical_render_ring_init,
59 .init_legacy = intel_init_render_ring_buffer,
60 },
61 [COPY_ENGINE_CLASS] = {
62 .name = "bcs",
63 .init_execlists = logical_xcs_ring_init,
64 .init_legacy = intel_init_blt_ring_buffer,
65 },
66 [VIDEO_DECODE_CLASS] = {
67 .name = "vcs",
68 .init_execlists = logical_xcs_ring_init,
69 .init_legacy = intel_init_bsd_ring_buffer,
70 },
71 [VIDEO_ENHANCEMENT_CLASS] = {
72 .name = "vecs",
73 .init_execlists = logical_xcs_ring_init,
74 .init_legacy = intel_init_vebox_ring_buffer,
75 },
76};
77
78struct engine_info {
Michal Wajdeczko237ae7c2017-03-01 20:26:15 +000079 unsigned int hw_id;
Chris Wilson1d39f282017-04-11 13:43:06 +010080 unsigned int uabi_id;
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -070081 u8 class;
82 u8 instance;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010083 u32 mmio_base;
84 unsigned irq_shift;
Oscar Mateob8400f02017-04-10 07:34:32 -070085};
86
87static const struct engine_info intel_engines[] = {
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010088 [RCS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010089 .hw_id = RCS_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +010090 .uabi_id = I915_EXEC_RENDER,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -070091 .class = RENDER_CLASS,
92 .instance = 0,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010093 .mmio_base = RENDER_RING_BASE,
94 .irq_shift = GEN8_RCS_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010095 },
96 [BCS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010097 .hw_id = BCS_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +010098 .uabi_id = I915_EXEC_BLT,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -070099 .class = COPY_ENGINE_CLASS,
100 .instance = 0,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100101 .mmio_base = BLT_RING_BASE,
102 .irq_shift = GEN8_BCS_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100103 },
104 [VCS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100105 .hw_id = VCS_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +0100106 .uabi_id = I915_EXEC_BSD,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700107 .class = VIDEO_DECODE_CLASS,
108 .instance = 0,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100109 .mmio_base = GEN6_BSD_RING_BASE,
110 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100111 },
112 [VCS2] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100113 .hw_id = VCS2_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +0100114 .uabi_id = I915_EXEC_BSD,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700115 .class = VIDEO_DECODE_CLASS,
116 .instance = 1,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100117 .mmio_base = GEN8_BSD2_RING_BASE,
118 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100119 },
120 [VECS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100121 .hw_id = VECS_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +0100122 .uabi_id = I915_EXEC_VEBOX,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700123 .class = VIDEO_ENHANCEMENT_CLASS,
124 .instance = 0,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100125 .mmio_base = VEBOX_RING_BASE,
126 .irq_shift = GEN8_VECS_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100127 },
128};
129
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300130/**
131 * ___intel_engine_context_size() - return the size of the context for an engine
132 * @dev_priv: i915 device private
133 * @class: engine class
134 *
135 * Each engine class may require a different amount of space for a context
136 * image.
137 *
138 * Return: size (in bytes) of an engine class specific context image
139 *
140 * Note: this size includes the HWSP, which is part of the context image
141 * in LRC mode, but does not include the "shared data page" used with
142 * GuC submission. The caller should account for this if using the GuC.
143 */
144static u32
145__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
146{
147 u32 cxt_size;
148
149 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
150
151 switch (class) {
152 case RENDER_CLASS:
153 switch (INTEL_GEN(dev_priv)) {
154 default:
155 MISSING_CASE(INTEL_GEN(dev_priv));
Rodrigo Vivif65f8412017-07-06 14:06:24 -0700156 case 10:
Oscar Mateo7fd0b1a2017-09-21 16:19:49 -0700157 return GEN10_LR_CONTEXT_RENDER_SIZE;
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300158 case 9:
159 return GEN9_LR_CONTEXT_RENDER_SIZE;
160 case 8:
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000161 return i915_modparams.enable_execlists ?
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300162 GEN8_LR_CONTEXT_RENDER_SIZE :
163 GEN8_CXT_TOTAL_SIZE;
164 case 7:
165 if (IS_HASWELL(dev_priv))
166 return HSW_CXT_TOTAL_SIZE;
167
168 cxt_size = I915_READ(GEN7_CXT_SIZE);
169 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
170 PAGE_SIZE);
171 case 6:
172 cxt_size = I915_READ(CXT_SIZE);
173 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
174 PAGE_SIZE);
175 case 5:
176 case 4:
177 case 3:
178 case 2:
179 /* For the special day when i810 gets merged. */
180 case 1:
181 return 0;
182 }
183 break;
184 default:
185 MISSING_CASE(class);
186 case VIDEO_DECODE_CLASS:
187 case VIDEO_ENHANCEMENT_CLASS:
188 case COPY_ENGINE_CLASS:
189 if (INTEL_GEN(dev_priv) < 8)
190 return 0;
191 return GEN8_LR_CONTEXT_OTHER_SIZE;
192 }
193}
194
Akash Goel3b3f1652016-10-13 22:44:48 +0530195static int
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100196intel_engine_setup(struct drm_i915_private *dev_priv,
197 enum intel_engine_id id)
198{
199 const struct engine_info *info = &intel_engines[id];
Oscar Mateob8400f02017-04-10 07:34:32 -0700200 const struct engine_class_info *class_info;
Akash Goel3b3f1652016-10-13 22:44:48 +0530201 struct intel_engine_cs *engine;
202
Oscar Mateob8400f02017-04-10 07:34:32 -0700203 GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
204 class_info = &intel_engine_classes[info->class];
205
Akash Goel3b3f1652016-10-13 22:44:48 +0530206 GEM_BUG_ON(dev_priv->engine[id]);
207 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
208 if (!engine)
209 return -ENOMEM;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100210
211 engine->id = id;
212 engine->i915 = dev_priv;
Oscar Mateo6e516142017-04-10 07:34:31 -0700213 WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
Oscar Mateob8400f02017-04-10 07:34:32 -0700214 class_info->name, info->instance) >=
215 sizeof(engine->name));
Chris Wilson1d39f282017-04-11 13:43:06 +0100216 engine->uabi_id = info->uabi_id;
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100217 engine->hw_id = engine->guc_id = info->hw_id;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100218 engine->mmio_base = info->mmio_base;
219 engine->irq_shift = info->irq_shift;
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700220 engine->class = info->class;
221 engine->instance = info->instance;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100222
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300223 engine->context_size = __intel_engine_context_size(dev_priv,
224 engine->class);
225 if (WARN_ON(engine->context_size > BIT(20)))
226 engine->context_size = 0;
227
Chris Wilson0de91362016-11-14 20:41:01 +0000228 /* Nothing to do here, execute in order of dependencies */
229 engine->schedule = NULL;
230
Changbin Du3fc03062017-03-13 10:47:11 +0800231 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
232
Akash Goel3b3f1652016-10-13 22:44:48 +0530233 dev_priv->engine[id] = engine;
234 return 0;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100235}
236
237/**
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300238 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +0000239 * @dev_priv: i915 device private
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100240 *
241 * Return: non-zero if the initialization failed.
242 */
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300243int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100244{
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100245 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
Chris Wilson5f9be052017-04-11 17:56:58 +0100246 const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
Akash Goel3b3f1652016-10-13 22:44:48 +0530247 struct intel_engine_cs *engine;
248 enum intel_engine_id id;
Chris Wilson5f9be052017-04-11 17:56:58 +0100249 unsigned int mask = 0;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100250 unsigned int i;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000251 int err;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100252
Tvrtko Ursulin70006ad2016-10-13 11:02:56 +0100253 WARN_ON(ring_mask == 0);
254 WARN_ON(ring_mask &
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100255 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
256
257 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
258 if (!HAS_ENGINE(dev_priv, i))
259 continue;
260
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000261 err = intel_engine_setup(dev_priv, i);
262 if (err)
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100263 goto cleanup;
264
265 mask |= ENGINE_MASK(i);
266 }
267
268 /*
269 * Catch failures to update intel_engines table when the new engines
270 * are added to the driver by a warning and disabling the forgotten
271 * engines.
272 */
Tvrtko Ursulin70006ad2016-10-13 11:02:56 +0100273 if (WARN_ON(mask != ring_mask))
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100274 device_info->ring_mask = mask;
275
Chris Wilson5f9be052017-04-11 17:56:58 +0100276 /* We always presume we have at least RCS available for later probing */
277 if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
278 err = -ENODEV;
279 goto cleanup;
280 }
281
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100282 device_info->num_rings = hweight32(mask);
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100283
284 return 0;
285
286cleanup:
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000287 for_each_engine(engine, dev_priv, id)
288 kfree(engine);
289 return err;
290}
291
292/**
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300293 * intel_engines_init() - init the Engine Command Streamers
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000294 * @dev_priv: i915 device private
295 *
296 * Return: non-zero if the initialization failed.
297 */
298int intel_engines_init(struct drm_i915_private *dev_priv)
299{
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000300 struct intel_engine_cs *engine;
301 enum intel_engine_id id, err_id;
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100302 int err;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000303
Akash Goel3b3f1652016-10-13 22:44:48 +0530304 for_each_engine(engine, dev_priv, id) {
Oscar Mateob8400f02017-04-10 07:34:32 -0700305 const struct engine_class_info *class_info =
306 &intel_engine_classes[engine->class];
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000307 int (*init)(struct intel_engine_cs *engine);
308
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000309 if (i915_modparams.enable_execlists)
Oscar Mateob8400f02017-04-10 07:34:32 -0700310 init = class_info->init_execlists;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000311 else
Oscar Mateob8400f02017-04-10 07:34:32 -0700312 init = class_info->init_legacy;
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100313
314 err = -EINVAL;
315 err_id = id;
316
317 if (GEM_WARN_ON(!init))
318 goto cleanup;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000319
320 err = init(engine);
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100321 if (err)
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000322 goto cleanup;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000323
Chris Wilsonff44ad52017-03-16 17:13:03 +0000324 GEM_BUG_ON(!engine->submit_request);
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000325 }
326
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000327 return 0;
328
329cleanup:
330 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100331 if (id >= err_id) {
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000332 kfree(engine);
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100333 dev_priv->engine[id] = NULL;
334 } else {
Tvrtko Ursulin8ee7c6e2017-02-16 12:23:22 +0000335 dev_priv->gt.cleanup_engine(engine);
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100336 }
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100337 }
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000338 return err;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100339}
340
Chris Wilson73cb9702016-10-28 13:58:46 +0100341void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
Chris Wilson57f275a2016-08-15 10:49:00 +0100342{
343 struct drm_i915_private *dev_priv = engine->i915;
344
345 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
346 * so long as the semaphore value in the register/page is greater
347 * than the sync value), so whenever we reset the seqno,
348 * so long as we reset the tracking semaphore value to 0, it will
349 * always be before the next request's seqno. If we don't reset
350 * the semaphore value, then when the seqno moves backwards all
351 * future waits will complete instantly (causing rendering corruption).
352 */
353 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
354 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
355 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
356 if (HAS_VEBOX(dev_priv))
357 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
358 }
Chris Wilson51d545d2016-08-15 10:49:02 +0100359 if (dev_priv->semaphore) {
360 struct page *page = i915_vma_first_page(dev_priv->semaphore);
361 void *semaphores;
362
363 /* Semaphores are in noncoherent memory, flush to be safe */
Chris Wilson24caf652017-03-20 14:56:09 +0000364 semaphores = kmap_atomic(page);
Chris Wilson57f275a2016-08-15 10:49:00 +0100365 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
366 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
Chris Wilson51d545d2016-08-15 10:49:02 +0100367 drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
368 I915_NUM_ENGINES * gen8_semaphore_seqno_size);
Chris Wilson24caf652017-03-20 14:56:09 +0000369 kunmap_atomic(semaphores);
Chris Wilson57f275a2016-08-15 10:49:00 +0100370 }
Chris Wilson57f275a2016-08-15 10:49:00 +0100371
372 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
Chris Wilson14a6bbf2017-03-14 11:14:52 +0000373 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
Chris Wilson73cb9702016-10-28 13:58:46 +0100374
Chris Wilson57f275a2016-08-15 10:49:00 +0100375 /* After manually advancing the seqno, fake the interrupt in case
376 * there are any waiters for that seqno.
377 */
378 intel_engine_wakeup(engine);
Chris Wilson2ca9faa2017-04-05 16:30:54 +0100379
380 GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
Chris Wilson57f275a2016-08-15 10:49:00 +0100381}
382
Chris Wilson73cb9702016-10-28 13:58:46 +0100383static void intel_engine_init_timeline(struct intel_engine_cs *engine)
Chris Wilsondcff85c2016-08-05 10:14:11 +0100384{
Chris Wilson73cb9702016-10-28 13:58:46 +0100385 engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
Chris Wilsondcff85c2016-08-05 10:14:11 +0100386}
387
Mika Kuoppala19df9a52017-09-22 15:43:04 +0300388static bool csb_force_mmio(struct drm_i915_private *i915)
389{
Mika Kuoppala19df9a52017-09-22 15:43:04 +0300390 /*
391 * IOMMU adds unpredictable latency causing the CSB write (from the
392 * GPU into the HWSP) to only be visible some time after the interrupt
393 * (missed breadcrumb syndrome).
394 */
395 if (intel_vtd_active())
396 return true;
397
Weinan Li1fd51d92017-10-15 11:55:25 +0800398 /* Older GVT emulation depends upon intercepting CSB mmio */
399 if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
400 return true;
401
Mika Kuoppala19df9a52017-09-22 15:43:04 +0300402 return false;
403}
404
405static void intel_engine_init_execlist(struct intel_engine_cs *engine)
406{
407 struct intel_engine_execlists * const execlists = &engine->execlists;
408
409 execlists->csb_use_mmio = csb_force_mmio(engine->i915);
410
Mika Kuoppala76e70082017-09-22 15:43:07 +0300411 execlists->port_mask = 1;
412 BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
413 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
414
Mika Kuoppala19df9a52017-09-22 15:43:04 +0300415 execlists->queue = RB_ROOT;
416 execlists->first = NULL;
417}
418
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100419/**
420 * intel_engines_setup_common - setup engine state not requiring hw access
421 * @engine: Engine to setup.
422 *
423 * Initializes @engine@ structure members shared between legacy and execlists
424 * submission modes which do not require hardware access.
425 *
426 * Typically done early in the submission mode specific engine setup stage.
427 */
428void intel_engine_setup_common(struct intel_engine_cs *engine)
429{
Mika Kuoppala19df9a52017-09-22 15:43:04 +0300430 intel_engine_init_execlist(engine);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100431
Chris Wilson73cb9702016-10-28 13:58:46 +0100432 intel_engine_init_timeline(engine);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100433 intel_engine_init_hangcheck(engine);
Chris Wilson115003e92016-08-04 16:32:19 +0100434 i915_gem_batch_pool_init(engine, &engine->batch_pool);
Chris Wilson7756e452016-08-18 17:17:10 +0100435
436 intel_engine_init_cmd_parser(engine);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100437}
438
Chris Wilsonadc320c2016-08-15 10:48:59 +0100439int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
440{
441 struct drm_i915_gem_object *obj;
442 struct i915_vma *vma;
443 int ret;
444
445 WARN_ON(engine->scratch);
446
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000447 obj = i915_gem_object_create_stolen(engine->i915, size);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100448 if (!obj)
Chris Wilson920cf412016-10-28 13:58:30 +0100449 obj = i915_gem_object_create_internal(engine->i915, size);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100450 if (IS_ERR(obj)) {
451 DRM_ERROR("Failed to allocate scratch page\n");
452 return PTR_ERR(obj);
453 }
454
Chris Wilsona01cb372017-01-16 15:21:30 +0000455 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100456 if (IS_ERR(vma)) {
457 ret = PTR_ERR(vma);
458 goto err_unref;
459 }
460
461 ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
462 if (ret)
463 goto err_unref;
464
465 engine->scratch = vma;
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100466 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
467 engine->name, i915_ggtt_offset(vma));
Chris Wilsonadc320c2016-08-15 10:48:59 +0100468 return 0;
469
470err_unref:
471 i915_gem_object_put(obj);
472 return ret;
473}
474
475static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
476{
Chris Wilson19880c42016-08-15 10:49:05 +0100477 i915_vma_unpin_and_release(&engine->scratch);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100478}
479
Daniele Ceraolo Spurio486e93f2017-09-13 09:56:02 +0100480static void cleanup_phys_status_page(struct intel_engine_cs *engine)
481{
482 struct drm_i915_private *dev_priv = engine->i915;
483
484 if (!dev_priv->status_page_dmah)
485 return;
486
487 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
488 engine->status_page.page_addr = NULL;
489}
490
491static void cleanup_status_page(struct intel_engine_cs *engine)
492{
493 struct i915_vma *vma;
494 struct drm_i915_gem_object *obj;
495
496 vma = fetch_and_zero(&engine->status_page.vma);
497 if (!vma)
498 return;
499
500 obj = vma->obj;
501
502 i915_vma_unpin(vma);
503 i915_vma_close(vma);
504
505 i915_gem_object_unpin_map(obj);
506 __i915_gem_object_release_unless_active(obj);
507}
508
509static int init_status_page(struct intel_engine_cs *engine)
510{
511 struct drm_i915_gem_object *obj;
512 struct i915_vma *vma;
513 unsigned int flags;
514 void *vaddr;
515 int ret;
516
517 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
518 if (IS_ERR(obj)) {
519 DRM_ERROR("Failed to allocate status page\n");
520 return PTR_ERR(obj);
521 }
522
523 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
524 if (ret)
525 goto err;
526
527 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
528 if (IS_ERR(vma)) {
529 ret = PTR_ERR(vma);
530 goto err;
531 }
532
533 flags = PIN_GLOBAL;
534 if (!HAS_LLC(engine->i915))
535 /* On g33, we cannot place HWS above 256MiB, so
536 * restrict its pinning to the low mappable arena.
537 * Though this restriction is not documented for
538 * gen4, gen5, or byt, they also behave similarly
539 * and hang if the HWS is placed at the top of the
540 * GTT. To generalise, it appears that all !llc
541 * platforms have issues with us placing the HWS
542 * above the mappable region (even though we never
543 * actually map it).
544 */
545 flags |= PIN_MAPPABLE;
Chris Wilson34a04e52017-09-13 09:56:03 +0100546 else
547 flags |= PIN_HIGH;
Daniele Ceraolo Spurio486e93f2017-09-13 09:56:02 +0100548 ret = i915_vma_pin(vma, 0, 4096, flags);
549 if (ret)
550 goto err;
551
552 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
553 if (IS_ERR(vaddr)) {
554 ret = PTR_ERR(vaddr);
555 goto err_unpin;
556 }
557
558 engine->status_page.vma = vma;
559 engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
560 engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
561
562 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
563 engine->name, i915_ggtt_offset(vma));
564 return 0;
565
566err_unpin:
567 i915_vma_unpin(vma);
568err:
569 i915_gem_object_put(obj);
570 return ret;
571}
572
573static int init_phys_status_page(struct intel_engine_cs *engine)
574{
575 struct drm_i915_private *dev_priv = engine->i915;
576
577 GEM_BUG_ON(engine->id != RCS);
578
579 dev_priv->status_page_dmah =
580 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
581 if (!dev_priv->status_page_dmah)
582 return -ENOMEM;
583
584 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
585 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
586
587 return 0;
588}
589
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100590/**
591 * intel_engines_init_common - initialize cengine state which might require hw access
592 * @engine: Engine to initialize.
593 *
594 * Initializes @engine@ structure members shared between legacy and execlists
595 * submission modes which do require hardware access.
596 *
597 * Typcally done at later stages of submission mode specific engine setup.
598 *
599 * Returns zero on success or an error code on failure.
600 */
601int intel_engine_init_common(struct intel_engine_cs *engine)
602{
Chris Wilson266a2402017-05-04 10:33:08 +0100603 struct intel_ring *ring;
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100604 int ret;
605
Chris Wilsonff44ad52017-03-16 17:13:03 +0000606 engine->set_default_submission(engine);
607
Chris Wilsone8a9c582016-12-18 15:37:20 +0000608 /* We may need to do things with the shrinker which
609 * require us to immediately switch back to the default
610 * context. This can cause a problem as pinning the
611 * default context also requires GTT space which may not
612 * be available. To avoid this we always pin the default
613 * context.
614 */
Chris Wilson266a2402017-05-04 10:33:08 +0100615 ring = engine->context_pin(engine, engine->i915->kernel_context);
616 if (IS_ERR(ring))
617 return PTR_ERR(ring);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100618
Chris Wilsone7af3112017-10-03 21:34:48 +0100619 /*
620 * Similarly the preempt context must always be available so that
621 * we can interrupt the engine at any time.
622 */
Michał Winiarskia4598d12017-10-25 22:00:18 +0200623 if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
Chris Wilsone7af3112017-10-03 21:34:48 +0100624 ring = engine->context_pin(engine,
625 engine->i915->preempt_context);
626 if (IS_ERR(ring)) {
627 ret = PTR_ERR(ring);
628 goto err_unpin_kernel;
629 }
630 }
631
Chris Wilsone8a9c582016-12-18 15:37:20 +0000632 ret = intel_engine_init_breadcrumbs(engine);
633 if (ret)
Chris Wilsone7af3112017-10-03 21:34:48 +0100634 goto err_unpin_preempt;
Chris Wilsone8a9c582016-12-18 15:37:20 +0000635
Chris Wilson4e50f082016-10-28 13:58:31 +0100636 ret = i915_gem_render_state_init(engine);
637 if (ret)
Daniele Ceraolo Spurio486e93f2017-09-13 09:56:02 +0100638 goto err_breadcrumbs;
639
640 if (HWS_NEEDS_PHYSICAL(engine->i915))
641 ret = init_phys_status_page(engine);
642 else
643 ret = init_status_page(engine);
644 if (ret)
645 goto err_rs_fini;
Chris Wilson4e50f082016-10-28 13:58:31 +0100646
Chris Wilson7756e452016-08-18 17:17:10 +0100647 return 0;
Chris Wilsone8a9c582016-12-18 15:37:20 +0000648
Daniele Ceraolo Spurio486e93f2017-09-13 09:56:02 +0100649err_rs_fini:
650 i915_gem_render_state_fini(engine);
651err_breadcrumbs:
652 intel_engine_fini_breadcrumbs(engine);
Chris Wilsone7af3112017-10-03 21:34:48 +0100653err_unpin_preempt:
Michał Winiarskia4598d12017-10-25 22:00:18 +0200654 if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
Chris Wilsone7af3112017-10-03 21:34:48 +0100655 engine->context_unpin(engine, engine->i915->preempt_context);
656err_unpin_kernel:
Chris Wilsone8a9c582016-12-18 15:37:20 +0000657 engine->context_unpin(engine, engine->i915->kernel_context);
658 return ret;
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100659}
Chris Wilson96a945a2016-08-03 13:19:16 +0100660
661/**
662 * intel_engines_cleanup_common - cleans up the engine state created by
663 * the common initiailizers.
664 * @engine: Engine to cleanup.
665 *
666 * This cleans up everything created by the common helpers.
667 */
668void intel_engine_cleanup_common(struct intel_engine_cs *engine)
669{
Chris Wilsonadc320c2016-08-15 10:48:59 +0100670 intel_engine_cleanup_scratch(engine);
671
Daniele Ceraolo Spurio486e93f2017-09-13 09:56:02 +0100672 if (HWS_NEEDS_PHYSICAL(engine->i915))
673 cleanup_phys_status_page(engine);
674 else
675 cleanup_status_page(engine);
676
Chris Wilson4e50f082016-10-28 13:58:31 +0100677 i915_gem_render_state_fini(engine);
Chris Wilson96a945a2016-08-03 13:19:16 +0100678 intel_engine_fini_breadcrumbs(engine);
Chris Wilson7756e452016-08-18 17:17:10 +0100679 intel_engine_cleanup_cmd_parser(engine);
Chris Wilson96a945a2016-08-03 13:19:16 +0100680 i915_gem_batch_pool_fini(&engine->batch_pool);
Chris Wilsone8a9c582016-12-18 15:37:20 +0000681
Michał Winiarskia4598d12017-10-25 22:00:18 +0200682 if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
Chris Wilsone7af3112017-10-03 21:34:48 +0100683 engine->context_unpin(engine, engine->i915->preempt_context);
Chris Wilsone8a9c582016-12-18 15:37:20 +0000684 engine->context_unpin(engine, engine->i915->kernel_context);
Chris Wilson96a945a2016-08-03 13:19:16 +0100685}
Chris Wilson1b365952016-10-04 21:11:31 +0100686
687u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
688{
689 struct drm_i915_private *dev_priv = engine->i915;
690 u64 acthd;
691
692 if (INTEL_GEN(dev_priv) >= 8)
693 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
694 RING_ACTHD_UDW(engine->mmio_base));
695 else if (INTEL_GEN(dev_priv) >= 4)
696 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
697 else
698 acthd = I915_READ(ACTHD);
699
700 return acthd;
701}
702
703u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
704{
705 struct drm_i915_private *dev_priv = engine->i915;
706 u64 bbaddr;
707
708 if (INTEL_GEN(dev_priv) >= 8)
709 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
710 RING_BBADDR_UDW(engine->mmio_base));
711 else
712 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
713
714 return bbaddr;
715}
Chris Wilson0e704472016-10-12 10:05:17 +0100716
717const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
718{
719 switch (type) {
720 case I915_CACHE_NONE: return " uncached";
721 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
722 case I915_CACHE_L3_LLC: return " L3+LLC";
723 case I915_CACHE_WT: return " WT";
724 default: return "";
725 }
726}
727
728static inline uint32_t
729read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
730 int subslice, i915_reg_t reg)
731{
732 uint32_t mcr;
733 uint32_t ret;
734 enum forcewake_domains fw_domains;
735
736 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
737 FW_REG_READ);
738 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
739 GEN8_MCR_SELECTOR,
740 FW_REG_READ | FW_REG_WRITE);
741
742 spin_lock_irq(&dev_priv->uncore.lock);
743 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
744
745 mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
746 /*
747 * The HW expects the slice and sublice selectors to be reset to 0
748 * after reading out the registers.
749 */
750 WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
751 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
752 mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
753 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
754
755 ret = I915_READ_FW(reg);
756
757 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
758 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
759
760 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
761 spin_unlock_irq(&dev_priv->uncore.lock);
762
763 return ret;
764}
765
766/* NB: please notice the memset */
767void intel_engine_get_instdone(struct intel_engine_cs *engine,
768 struct intel_instdone *instdone)
769{
770 struct drm_i915_private *dev_priv = engine->i915;
771 u32 mmio_base = engine->mmio_base;
772 int slice;
773 int subslice;
774
775 memset(instdone, 0, sizeof(*instdone));
776
777 switch (INTEL_GEN(dev_priv)) {
778 default:
779 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
780
781 if (engine->id != RCS)
782 break;
783
784 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
785 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
786 instdone->sampler[slice][subslice] =
787 read_subslice_reg(dev_priv, slice, subslice,
788 GEN7_SAMPLER_INSTDONE);
789 instdone->row[slice][subslice] =
790 read_subslice_reg(dev_priv, slice, subslice,
791 GEN7_ROW_INSTDONE);
792 }
793 break;
794 case 7:
795 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
796
797 if (engine->id != RCS)
798 break;
799
800 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
801 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
802 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
803
804 break;
805 case 6:
806 case 5:
807 case 4:
808 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
809
810 if (engine->id == RCS)
811 /* HACK: Using the wrong struct member */
812 instdone->slice_common = I915_READ(GEN4_INSTDONE1);
813 break;
814 case 3:
815 case 2:
816 instdone->instdone = I915_READ(GEN2_INSTDONE);
817 break;
818 }
819}
Chris Wilsonf97fbf92017-02-13 17:15:14 +0000820
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000821static int wa_add(struct drm_i915_private *dev_priv,
822 i915_reg_t addr,
823 const u32 mask, const u32 val)
824{
825 const u32 idx = dev_priv->workarounds.count;
826
827 if (WARN_ON(idx >= I915_MAX_WA_REGS))
828 return -ENOSPC;
829
830 dev_priv->workarounds.reg[idx].addr = addr;
831 dev_priv->workarounds.reg[idx].value = val;
832 dev_priv->workarounds.reg[idx].mask = mask;
833
834 dev_priv->workarounds.count++;
835
836 return 0;
837}
838
839#define WA_REG(addr, mask, val) do { \
840 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
841 if (r) \
842 return r; \
843 } while (0)
844
845#define WA_SET_BIT_MASKED(addr, mask) \
846 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
847
848#define WA_CLR_BIT_MASKED(addr, mask) \
849 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
850
851#define WA_SET_FIELD_MASKED(addr, mask, value) \
852 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
853
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000854static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
855 i915_reg_t reg)
856{
857 struct drm_i915_private *dev_priv = engine->i915;
858 struct i915_workarounds *wa = &dev_priv->workarounds;
859 const uint32_t index = wa->hw_whitelist_count[engine->id];
860
861 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
862 return -EINVAL;
863
Oscar Mateo32ced392017-09-28 15:40:39 -0700864 I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
865 i915_mmio_reg_offset(reg));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000866 wa->hw_whitelist_count[engine->id]++;
867
868 return 0;
869}
870
871static int gen8_init_workarounds(struct intel_engine_cs *engine)
872{
873 struct drm_i915_private *dev_priv = engine->i915;
874
875 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
876
877 /* WaDisableAsyncFlipPerfMode:bdw,chv */
878 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
879
880 /* WaDisablePartialInstShootdown:bdw,chv */
881 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
882 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
883
884 /* Use Force Non-Coherent whenever executing a 3D context. This is a
885 * workaround for for a possible hang in the unlikely event a TLB
886 * invalidation occurs during a PSD flush.
887 */
888 /* WaForceEnableNonCoherent:bdw,chv */
889 /* WaHdcDisableFetchWhenMasked:bdw,chv */
890 WA_SET_BIT_MASKED(HDC_CHICKEN0,
891 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
892 HDC_FORCE_NON_COHERENT);
893
894 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
895 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
896 * polygons in the same 8x4 pixel/sample area to be processed without
897 * stalling waiting for the earlier ones to write to Hierarchical Z
898 * buffer."
899 *
900 * This optimization is off by default for BDW and CHV; turn it on.
901 */
902 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
903
904 /* Wa4x4STCOptimizationDisable:bdw,chv */
905 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
906
907 /*
908 * BSpec recommends 8x4 when MSAA is used,
909 * however in practice 16x4 seems fastest.
910 *
911 * Note that PS/WM thread counts depend on the WIZ hashing
912 * disable bit, which we don't touch here, but it's good
913 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
914 */
915 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
916 GEN6_WIZ_HASHING_MASK,
917 GEN6_WIZ_HASHING_16x4);
918
919 return 0;
920}
921
922static int bdw_init_workarounds(struct intel_engine_cs *engine)
923{
924 struct drm_i915_private *dev_priv = engine->i915;
925 int ret;
926
927 ret = gen8_init_workarounds(engine);
928 if (ret)
929 return ret;
930
931 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
932 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
933
934 /* WaDisableDopClockGating:bdw
935 *
936 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
937 * to disable EUTC clock gating.
938 */
939 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
940 DOP_CLOCK_GATING_DISABLE);
941
942 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
943 GEN8_SAMPLER_POWER_BYPASS_DIS);
944
945 WA_SET_BIT_MASKED(HDC_CHICKEN0,
946 /* WaForceContextSaveRestoreNonCoherent:bdw */
947 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
948 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
949 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
950
951 return 0;
952}
953
954static int chv_init_workarounds(struct intel_engine_cs *engine)
955{
956 struct drm_i915_private *dev_priv = engine->i915;
957 int ret;
958
959 ret = gen8_init_workarounds(engine);
960 if (ret)
961 return ret;
962
963 /* WaDisableThreadStallDopClockGating:chv */
964 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
965
966 /* Improve HiZ throughput on CHV. */
967 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
968
969 return 0;
970}
971
972static int gen9_init_workarounds(struct intel_engine_cs *engine)
973{
974 struct drm_i915_private *dev_priv = engine->i915;
975 int ret;
976
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700977 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000978 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
979
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700980 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000981 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
982 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
983
Rodrigo Vivi98eed3d2017-06-19 14:21:47 -0700984 /* WaDisableKillLogic:bxt,skl,kbl */
985 if (!IS_COFFEELAKE(dev_priv))
986 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
987 ECOCHK_DIS_TLB);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000988
Ville Syrjälä93564042017-08-24 22:10:51 +0300989 if (HAS_LLC(dev_priv)) {
990 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
991 *
992 * Must match Display Engine. See
993 * WaCompressedResourceDisplayNewHashMode.
994 */
995 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
996 GEN9_PBE_COMPRESSED_HASH_SELECTION);
997 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
998 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
Chris Wilson53221e12017-10-04 13:41:52 +0100999
1000 I915_WRITE(MMCD_MISC_CTRL,
1001 I915_READ(MMCD_MISC_CTRL) |
1002 MMCD_PCLA |
1003 MMCD_HOTSPOT_EN);
Ville Syrjälä93564042017-08-24 22:10:51 +03001004 }
1005
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001006 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
1007 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001008 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1009 FLOW_CONTROL_ENABLE |
1010 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
1011
1012 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001013 if (!IS_COFFEELAKE(dev_priv))
1014 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
1015 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001016
1017 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
1018 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1019 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
1020 GEN9_DG_MIRROR_FIX_ENABLE);
1021
1022 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
1023 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1024 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
1025 GEN9_RHWO_OPTIMIZATION_DISABLE);
1026 /*
1027 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
1028 * but we do that in per ctx batchbuffer as there is an issue
1029 * with this register not getting restored on ctx restore
1030 */
1031 }
1032
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001033 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
1034 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001035 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
Arkadiusz Hiler0b71cea2017-05-12 13:20:15 +02001036 GEN9_ENABLE_YV12_BUGFIX |
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001037 GEN9_ENABLE_GPGPU_PREEMPTION);
1038
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001039 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
1040 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001041 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
1042 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
1043
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001044 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001045 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
1046 GEN9_CCS_TLB_PREFETCH_ENABLE);
1047
1048 /* WaDisableMaskBasedCammingInRCC:bxt */
1049 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1050 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
1051 PIXEL_MASK_CAMMING_DISABLE);
1052
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001053 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001054 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1055 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
1056 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
1057
1058 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
1059 * both tied to WaForceContextSaveRestoreNonCoherent
1060 * in some hsds for skl. We keep the tie for all gen9. The
1061 * documentation is a bit hazy and so we want to get common behaviour,
1062 * even though there is no clear evidence we would need both on kbl/bxt.
1063 * This area has been source of system hangs so we play it safe
1064 * and mimic the skl regardless of what bspec says.
1065 *
1066 * Use Force Non-Coherent whenever executing a 3D context. This
1067 * is a workaround for a possible hang in the unlikely event
1068 * a TLB invalidation occurs during a PSD flush.
1069 */
1070
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001071 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001072 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1073 HDC_FORCE_NON_COHERENT);
1074
Rodrigo Vivi98eed3d2017-06-19 14:21:47 -07001075 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
1076 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
1077 BDW_DISABLE_HDC_INVALIDATION);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001078
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001079 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001080 if (IS_SKYLAKE(dev_priv) ||
1081 IS_KABYLAKE(dev_priv) ||
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001082 IS_COFFEELAKE(dev_priv) ||
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001083 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
1084 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
1085 GEN8_SAMPLER_POWER_BYPASS_DIS);
1086
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001087 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001088 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
1089
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001090 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001091 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
1092 GEN8_LQSC_FLUSH_COHERENT_LINES));
1093
Michał Winiarski5152def2017-10-03 21:34:46 +01001094 /*
1095 * Supporting preemption with fine-granularity requires changes in the
1096 * batch buffer programming. Since we can't break old userspace, we
1097 * need to set our default preemption level to safe value. Userspace is
1098 * still able to use more fine-grained preemption levels, since in
1099 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
1100 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
1101 * not real HW workarounds, but merely a way to start using preemption
1102 * while maintaining old contract with userspace.
1103 */
1104
1105 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
1106 WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
1107
1108 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
1109 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
1110 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
1111
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001112 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001113 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
1114 if (ret)
1115 return ret;
1116
Jeff McGee1e998342017-10-03 21:34:45 +01001117 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1118 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1119 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1120 ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001121 if (ret)
1122 return ret;
1123
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001124 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001125 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
1126 if (ret)
1127 return ret;
1128
1129 return 0;
1130}
1131
1132static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1133{
1134 struct drm_i915_private *dev_priv = engine->i915;
1135 u8 vals[3] = { 0, 0, 0 };
1136 unsigned int i;
1137
1138 for (i = 0; i < 3; i++) {
1139 u8 ss;
1140
1141 /*
1142 * Only consider slices where one, and only one, subslice has 7
1143 * EUs
1144 */
1145 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
1146 continue;
1147
1148 /*
1149 * subslice_7eu[i] != 0 (because of the check above) and
1150 * ss_max == 4 (maximum number of subslices possible per slice)
1151 *
1152 * -> 0 <= ss <= 3;
1153 */
1154 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
1155 vals[i] = 3 - ss;
1156 }
1157
1158 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
1159 return 0;
1160
1161 /* Tune IZ hashing. See intel_device_info_runtime_init() */
1162 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
1163 GEN9_IZ_HASHING_MASK(2) |
1164 GEN9_IZ_HASHING_MASK(1) |
1165 GEN9_IZ_HASHING_MASK(0),
1166 GEN9_IZ_HASHING(2, vals[2]) |
1167 GEN9_IZ_HASHING(1, vals[1]) |
1168 GEN9_IZ_HASHING(0, vals[0]));
1169
1170 return 0;
1171}
1172
1173static int skl_init_workarounds(struct intel_engine_cs *engine)
1174{
1175 struct drm_i915_private *dev_priv = engine->i915;
1176 int ret;
1177
1178 ret = gen9_init_workarounds(engine);
1179 if (ret)
1180 return ret;
1181
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001182 /* WaEnableGapsTsvCreditFix:skl */
1183 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1184 GEN9_GAPS_TSV_CREDIT_DISABLE));
1185
1186 /* WaDisableGafsUnitClkGating:skl */
Oscar Mateo4827c542017-09-07 08:40:07 -07001187 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1188 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001189
1190 /* WaInPlaceDecompressionHang:skl */
1191 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
Oscar Mateoefc886c2017-09-07 08:40:04 -07001192 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1193 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1194 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001195
1196 /* WaDisableLSQCROPERFforOCL:skl */
1197 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1198 if (ret)
1199 return ret;
1200
1201 return skl_tune_iz_hashing(engine);
1202}
1203
1204static int bxt_init_workarounds(struct intel_engine_cs *engine)
1205{
1206 struct drm_i915_private *dev_priv = engine->i915;
1207 int ret;
1208
1209 ret = gen9_init_workarounds(engine);
1210 if (ret)
1211 return ret;
1212
1213 /* WaStoreMultiplePTEenable:bxt */
1214 /* This is a requirement according to Hardware specification */
1215 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1216 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1217
1218 /* WaSetClckGatingDisableMedia:bxt */
1219 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1220 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1221 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1222 }
1223
1224 /* WaDisableThreadStallDopClockGating:bxt */
1225 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1226 STALL_DOP_GATING_DISABLE);
1227
1228 /* WaDisablePooledEuLoadBalancingFix:bxt */
1229 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
Oscar Mateo212154b2017-09-07 08:40:09 -07001230 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1231 _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001232 }
1233
1234 /* WaDisableSbeCacheDispatchPortSharing:bxt */
1235 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1236 WA_SET_BIT_MASKED(
1237 GEN7_HALF_SLICE_CHICKEN1,
1238 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1239 }
1240
1241 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1242 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1243 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1244 /* WaDisableLSQCROPERFforOCL:bxt */
1245 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1246 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1247 if (ret)
1248 return ret;
1249
1250 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1251 if (ret)
1252 return ret;
1253 }
1254
1255 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
Oscar Mateo930a7842017-10-17 13:25:45 -07001256 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1257 u32 val = I915_READ(GEN8_L3SQCREG1);
1258 val &= ~L3_PRIO_CREDITS_MASK;
1259 val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
1260 I915_WRITE(GEN8_L3SQCREG1, val);
1261 }
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001262
1263 /* WaToEnableHwFixForPushConstHWBug:bxt */
1264 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1265 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1266 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1267
1268 /* WaInPlaceDecompressionHang:bxt */
1269 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
Oscar Mateoefc886c2017-09-07 08:40:04 -07001270 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1271 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1272 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001273
1274 return 0;
1275}
1276
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001277static int cnl_init_workarounds(struct intel_engine_cs *engine)
1278{
1279 struct drm_i915_private *dev_priv = engine->i915;
1280 int ret;
1281
Oscar Mateo6cf20a02017-09-07 08:40:05 -07001282 /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
Rodrigo Vivi86ebb012017-08-29 16:07:51 -07001283 if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
Oscar Mateo6cf20a02017-09-07 08:40:05 -07001284 I915_WRITE(GAMT_CHKN_BIT_REG,
1285 (I915_READ(GAMT_CHKN_BIT_REG) |
1286 GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT));
Rodrigo Vivi86ebb012017-08-29 16:07:51 -07001287
Rodrigo Viviacfb5552017-08-23 13:35:04 -07001288 /* WaForceContextSaveRestoreNonCoherent:cnl */
1289 WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
1290 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
1291
Rodrigo Viviaa9f4c42017-09-06 15:03:25 -07001292 /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
1293 if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
1294 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
1295
Rodrigo Vivie6d1a4f2017-08-15 16:16:49 -07001296 /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
1297 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1298 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1299
Rodrigo Vivid1d24752017-08-15 16:16:50 -07001300 /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
1301 if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
1302 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1303 GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
1304
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001305 /* WaInPlaceDecompressionHang:cnl */
Oscar Mateoefc886c2017-09-07 08:40:04 -07001306 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1307 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1308 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001309
Oscar Mateo2cbecff2017-08-23 12:56:31 -07001310 /* WaPushConstantDereferenceHoldDisable:cnl */
Oscar Mateob27f5902017-09-07 08:40:06 -07001311 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
Oscar Mateo2cbecff2017-08-23 12:56:31 -07001312
Rodrigo Vivi392572f2017-08-29 16:07:23 -07001313 /* FtrEnableFastAnisoL1BankingFix: cnl */
1314 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
1315
Michał Winiarski5152def2017-10-03 21:34:46 +01001316 /* WaDisable3DMidCmdPreemption:cnl */
1317 WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
1318
1319 /* WaDisableGPGPUMidCmdPreemption:cnl */
1320 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
1321 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
1322
Rafael Antognolli0a607972017-11-03 11:30:27 -07001323 /* ReadHitWriteOnlyDisable: cnl */
1324 WA_SET_BIT_MASKED(SLICE_UNIT_LEVEL_CLKGATE, RCCUNIT_CLKGATE_DIS);
1325
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001326 /* WaEnablePreemptionGranularityControlByUMD:cnl */
Jeff McGee1e998342017-10-03 21:34:45 +01001327 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1328 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001329 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
1330 if (ret)
1331 return ret;
1332
1333 return 0;
1334}
1335
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001336static int kbl_init_workarounds(struct intel_engine_cs *engine)
1337{
1338 struct drm_i915_private *dev_priv = engine->i915;
1339 int ret;
1340
1341 ret = gen9_init_workarounds(engine);
1342 if (ret)
1343 return ret;
1344
1345 /* WaEnableGapsTsvCreditFix:kbl */
1346 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1347 GEN9_GAPS_TSV_CREDIT_DISABLE));
1348
1349 /* WaDisableDynamicCreditSharing:kbl */
1350 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
Oscar Mateoc6ea497c2017-09-07 08:40:08 -07001351 I915_WRITE(GAMT_CHKN_BIT_REG,
1352 (I915_READ(GAMT_CHKN_BIT_REG) |
1353 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001354
1355 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1356 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1357 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1358 HDC_FENCE_DEST_SLM_DISABLE);
1359
1360 /* WaToEnableHwFixForPushConstHWBug:kbl */
1361 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1362 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1363 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1364
1365 /* WaDisableGafsUnitClkGating:kbl */
Oscar Mateo4827c542017-09-07 08:40:07 -07001366 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1367 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001368
1369 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1370 WA_SET_BIT_MASKED(
1371 GEN7_HALF_SLICE_CHICKEN1,
1372 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1373
1374 /* WaInPlaceDecompressionHang:kbl */
Oscar Mateoefc886c2017-09-07 08:40:04 -07001375 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1376 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1377 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001378
1379 /* WaDisableLSQCROPERFforOCL:kbl */
1380 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1381 if (ret)
1382 return ret;
1383
1384 return 0;
1385}
1386
1387static int glk_init_workarounds(struct intel_engine_cs *engine)
1388{
1389 struct drm_i915_private *dev_priv = engine->i915;
1390 int ret;
1391
1392 ret = gen9_init_workarounds(engine);
1393 if (ret)
1394 return ret;
1395
1396 /* WaToEnableHwFixForPushConstHWBug:glk */
1397 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1398 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1399
1400 return 0;
1401}
1402
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001403static int cfl_init_workarounds(struct intel_engine_cs *engine)
1404{
1405 struct drm_i915_private *dev_priv = engine->i915;
1406 int ret;
1407
1408 ret = gen9_init_workarounds(engine);
1409 if (ret)
1410 return ret;
1411
1412 /* WaEnableGapsTsvCreditFix:cfl */
1413 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1414 GEN9_GAPS_TSV_CREDIT_DISABLE));
1415
1416 /* WaToEnableHwFixForPushConstHWBug:cfl */
1417 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1418 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1419
1420 /* WaDisableGafsUnitClkGating:cfl */
Oscar Mateo4827c542017-09-07 08:40:07 -07001421 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1422 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001423
1424 /* WaDisableSbeCacheDispatchPortSharing:cfl */
1425 WA_SET_BIT_MASKED(
1426 GEN7_HALF_SLICE_CHICKEN1,
1427 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1428
1429 /* WaInPlaceDecompressionHang:cfl */
Oscar Mateoefc886c2017-09-07 08:40:04 -07001430 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1431 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1432 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001433
1434 return 0;
1435}
1436
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001437int init_workarounds_ring(struct intel_engine_cs *engine)
1438{
1439 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson02e012f2017-03-01 12:11:31 +00001440 int err;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001441
1442 WARN_ON(engine->id != RCS);
1443
1444 dev_priv->workarounds.count = 0;
Chris Wilson02e012f2017-03-01 12:11:31 +00001445 dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001446
1447 if (IS_BROADWELL(dev_priv))
Chris Wilson02e012f2017-03-01 12:11:31 +00001448 err = bdw_init_workarounds(engine);
1449 else if (IS_CHERRYVIEW(dev_priv))
1450 err = chv_init_workarounds(engine);
1451 else if (IS_SKYLAKE(dev_priv))
1452 err = skl_init_workarounds(engine);
1453 else if (IS_BROXTON(dev_priv))
1454 err = bxt_init_workarounds(engine);
1455 else if (IS_KABYLAKE(dev_priv))
1456 err = kbl_init_workarounds(engine);
1457 else if (IS_GEMINILAKE(dev_priv))
1458 err = glk_init_workarounds(engine);
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001459 else if (IS_COFFEELAKE(dev_priv))
1460 err = cfl_init_workarounds(engine);
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001461 else if (IS_CANNONLAKE(dev_priv))
1462 err = cnl_init_workarounds(engine);
Chris Wilson02e012f2017-03-01 12:11:31 +00001463 else
1464 err = 0;
1465 if (err)
1466 return err;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001467
Chris Wilson02e012f2017-03-01 12:11:31 +00001468 DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
1469 engine->name, dev_priv->workarounds.count);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001470 return 0;
1471}
1472
1473int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
1474{
1475 struct i915_workarounds *w = &req->i915->workarounds;
1476 u32 *cs;
1477 int ret, i;
1478
1479 if (w->count == 0)
1480 return 0;
1481
1482 ret = req->engine->emit_flush(req, EMIT_BARRIER);
1483 if (ret)
1484 return ret;
1485
1486 cs = intel_ring_begin(req, (w->count * 2 + 2));
1487 if (IS_ERR(cs))
1488 return PTR_ERR(cs);
1489
1490 *cs++ = MI_LOAD_REGISTER_IMM(w->count);
1491 for (i = 0; i < w->count; i++) {
1492 *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
1493 *cs++ = w->reg[i].value;
1494 }
1495 *cs++ = MI_NOOP;
1496
1497 intel_ring_advance(req, cs);
1498
1499 ret = req->engine->emit_flush(req, EMIT_BARRIER);
1500 if (ret)
1501 return ret;
1502
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001503 return 0;
1504}
1505
Chris Wilsona091d4e2017-05-30 13:13:33 +01001506static bool ring_is_idle(struct intel_engine_cs *engine)
1507{
1508 struct drm_i915_private *dev_priv = engine->i915;
1509 bool idle = true;
1510
1511 intel_runtime_pm_get(dev_priv);
1512
Chris Wilsonaed2fc12017-05-30 13:13:34 +01001513 /* First check that no commands are left in the ring */
1514 if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
1515 (I915_READ_TAIL(engine) & TAIL_ADDR))
1516 idle = false;
1517
Chris Wilsona091d4e2017-05-30 13:13:33 +01001518 /* No bit for gen2, so assume the CS parser is idle */
1519 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1520 idle = false;
1521
1522 intel_runtime_pm_put(dev_priv);
1523
1524 return idle;
1525}
1526
Chris Wilson54003672017-03-03 12:19:46 +00001527/**
1528 * intel_engine_is_idle() - Report if the engine has finished process all work
1529 * @engine: the intel_engine_cs
1530 *
1531 * Return true if there are no requests pending, nothing left to be submitted
1532 * to hardware, and that the engine is idle.
1533 */
1534bool intel_engine_is_idle(struct intel_engine_cs *engine)
1535{
1536 struct drm_i915_private *dev_priv = engine->i915;
1537
Chris Wilsona8e9a412017-04-11 20:00:42 +01001538 /* More white lies, if wedged, hw state is inconsistent */
1539 if (i915_terminally_wedged(&dev_priv->gpu_error))
1540 return true;
1541
Chris Wilson54003672017-03-03 12:19:46 +00001542 /* Any inflight/incomplete requests? */
1543 if (!i915_seqno_passed(intel_engine_get_seqno(engine),
1544 intel_engine_last_submit(engine)))
1545 return false;
1546
Chris Wilson8968a362017-04-12 00:44:26 +01001547 if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
1548 return true;
1549
Chris Wilson54003672017-03-03 12:19:46 +00001550 /* Interrupt/tasklet pending? */
1551 if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
1552 return false;
1553
Chris Wilson4a118ec2017-10-23 22:32:36 +01001554 /* Waiting to drain ELSP? */
1555 if (READ_ONCE(engine->execlists.active))
Chris Wilson54003672017-03-03 12:19:46 +00001556 return false;
1557
Chris Wilsond6edb6e2017-07-21 13:32:24 +01001558 /* ELSP is empty, but there are ready requests? */
Mika Kuoppalab620e872017-09-22 15:43:03 +03001559 if (READ_ONCE(engine->execlists.first))
Chris Wilsond6edb6e2017-07-21 13:32:24 +01001560 return false;
1561
Chris Wilson54003672017-03-03 12:19:46 +00001562 /* Ring stopped? */
Chris Wilsona091d4e2017-05-30 13:13:33 +01001563 if (!ring_is_idle(engine))
Chris Wilson54003672017-03-03 12:19:46 +00001564 return false;
1565
1566 return true;
1567}
1568
Chris Wilson05425242017-03-03 12:19:47 +00001569bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
1570{
1571 struct intel_engine_cs *engine;
1572 enum intel_engine_id id;
1573
Chris Wilson8490ae202017-03-30 15:50:37 +01001574 if (READ_ONCE(dev_priv->gt.active_requests))
1575 return false;
1576
1577 /* If the driver is wedged, HW state may be very inconsistent and
1578 * report that it is still busy, even though we have stopped using it.
1579 */
1580 if (i915_terminally_wedged(&dev_priv->gpu_error))
1581 return true;
1582
Chris Wilson05425242017-03-03 12:19:47 +00001583 for_each_engine(engine, dev_priv, id) {
1584 if (!intel_engine_is_idle(engine))
1585 return false;
1586 }
1587
1588 return true;
1589}
1590
Chris Wilson20ccd4d2017-10-24 23:08:55 +01001591bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
1592{
1593 return (!engine->last_retired_context ||
1594 i915_gem_context_is_kernel(engine->last_retired_context));
1595}
1596
Chris Wilsonff44ad52017-03-16 17:13:03 +00001597void intel_engines_reset_default_submission(struct drm_i915_private *i915)
1598{
1599 struct intel_engine_cs *engine;
1600 enum intel_engine_id id;
1601
1602 for_each_engine(engine, i915, id)
1603 engine->set_default_submission(engine);
1604}
1605
Chris Wilsonaba5e272017-10-25 15:39:41 +01001606/**
1607 * intel_engines_park: called when the GT is transitioning from busy->idle
1608 * @i915: the i915 device
1609 *
1610 * The GT is now idle and about to go to sleep (maybe never to wake again?).
1611 * Time for us to tidy and put away our toys (release resources back to the
1612 * system).
1613 */
1614void intel_engines_park(struct drm_i915_private *i915)
Chris Wilson6c067572017-05-17 13:10:03 +01001615{
1616 struct intel_engine_cs *engine;
1617 enum intel_engine_id id;
1618
1619 for_each_engine(engine, i915, id) {
Chris Wilson820c5bb2017-11-01 20:21:49 +00001620 /* Flush the residual irq tasklets first. */
1621 intel_engine_disarm_breadcrumbs(engine);
1622 tasklet_kill(&engine->execlists.irq_tasklet);
1623
Chris Wilson32651242017-10-27 12:06:17 +01001624 /*
1625 * We are committed now to parking the engines, make sure there
1626 * will be no more interrupts arriving later and the engines
1627 * are truly idle.
1628 */
Chris Wilson30b29402017-11-10 11:25:50 +00001629 if (wait_for(intel_engine_is_idle(engine), 10)) {
Chris Wilson32651242017-10-27 12:06:17 +01001630 struct drm_printer p = drm_debug_printer(__func__);
1631
Chris Wilson30b29402017-11-10 11:25:50 +00001632 dev_err(i915->drm.dev,
1633 "%s is not idle before parking\n",
1634 engine->name);
Chris Wilson32651242017-10-27 12:06:17 +01001635 intel_engine_dump(engine, &p);
1636 }
1637
Chris Wilsonaba5e272017-10-25 15:39:41 +01001638 if (engine->park)
1639 engine->park(engine);
1640
Chris Wilsonaba5e272017-10-25 15:39:41 +01001641 i915_gem_batch_pool_fini(&engine->batch_pool);
Mika Kuoppalab620e872017-09-22 15:43:03 +03001642 engine->execlists.no_priolist = false;
Chris Wilson6c067572017-05-17 13:10:03 +01001643 }
1644}
1645
Chris Wilsonaba5e272017-10-25 15:39:41 +01001646/**
1647 * intel_engines_unpark: called when the GT is transitioning from idle->busy
1648 * @i915: the i915 device
1649 *
1650 * The GT was idle and now about to fire up with some new user requests.
1651 */
1652void intel_engines_unpark(struct drm_i915_private *i915)
1653{
1654 struct intel_engine_cs *engine;
1655 enum intel_engine_id id;
1656
1657 for_each_engine(engine, i915, id) {
1658 if (engine->unpark)
1659 engine->unpark(engine);
1660 }
1661}
1662
Chris Wilson90cad092017-09-06 16:28:59 +01001663bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1664{
1665 switch (INTEL_GEN(engine->i915)) {
1666 case 2:
1667 return false; /* uses physical not virtual addresses */
1668 case 3:
1669 /* maybe only uses physical not virtual addresses */
1670 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1671 case 6:
1672 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1673 default:
1674 return true;
1675 }
1676}
1677
Chris Wilsonf636edb2017-10-09 12:02:57 +01001678static void print_request(struct drm_printer *m,
1679 struct drm_i915_gem_request *rq,
1680 const char *prefix)
1681{
Chris Wilsona27d5a42017-10-15 21:43:10 +01001682 drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix,
1683 rq->global_seqno,
1684 i915_gem_request_completed(rq) ? "!" : "",
1685 rq->ctx->hw_id, rq->fence.seqno,
Chris Wilsonf636edb2017-10-09 12:02:57 +01001686 rq->priotree.priority,
1687 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
1688 rq->timeline->common->name);
1689}
1690
1691void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
1692{
Chris Wilsona27d5a42017-10-15 21:43:10 +01001693 struct intel_breadcrumbs * const b = &engine->breadcrumbs;
1694 const struct intel_engine_execlists * const execlists = &engine->execlists;
1695 struct i915_gpu_error * const error = &engine->i915->gpu_error;
Chris Wilsonf636edb2017-10-09 12:02:57 +01001696 struct drm_i915_private *dev_priv = engine->i915;
1697 struct drm_i915_gem_request *rq;
1698 struct rb_node *rb;
1699 u64 addr;
1700
1701 drm_printf(m, "%s\n", engine->name);
1702 drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
1703 intel_engine_get_seqno(engine),
1704 intel_engine_last_submit(engine),
1705 engine->hangcheck.seqno,
1706 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
1707 engine->timeline->inflight_seqnos);
1708 drm_printf(m, "\tReset count: %d\n",
1709 i915_reset_engine_count(error, engine));
1710
1711 rcu_read_lock();
1712
1713 drm_printf(m, "\tRequests:\n");
1714
1715 rq = list_first_entry(&engine->timeline->requests,
1716 struct drm_i915_gem_request, link);
1717 if (&rq->link != &engine->timeline->requests)
1718 print_request(m, rq, "\t\tfirst ");
1719
1720 rq = list_last_entry(&engine->timeline->requests,
1721 struct drm_i915_gem_request, link);
1722 if (&rq->link != &engine->timeline->requests)
1723 print_request(m, rq, "\t\tlast ");
1724
1725 rq = i915_gem_find_active_request(engine);
1726 if (rq) {
1727 print_request(m, rq, "\t\tactive ");
1728 drm_printf(m,
1729 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
1730 rq->head, rq->postfix, rq->tail,
1731 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1732 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1733 }
1734
1735 drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
1736 I915_READ(RING_START(engine->mmio_base)),
1737 rq ? i915_ggtt_offset(rq->ring->vma) : 0);
1738 drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
1739 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
1740 rq ? rq->ring->head : 0);
1741 drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
1742 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
1743 rq ? rq->ring->tail : 0);
Chris Wilson3c75de52017-10-26 12:50:48 +01001744 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
Chris Wilsonf636edb2017-10-09 12:02:57 +01001745 I915_READ(RING_CTL(engine->mmio_base)),
Chris Wilson3c75de52017-10-26 12:50:48 +01001746 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1747 if (INTEL_GEN(engine->i915) > 2) {
1748 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
1749 I915_READ(RING_MI_MODE(engine->mmio_base)),
1750 I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
1751 }
Chris Wilsonf636edb2017-10-09 12:02:57 +01001752
1753 rcu_read_unlock();
1754
1755 addr = intel_engine_get_active_head(engine);
1756 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1757 upper_32_bits(addr), lower_32_bits(addr));
1758 addr = intel_engine_get_last_batch_head(engine);
1759 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1760 upper_32_bits(addr), lower_32_bits(addr));
1761
1762 if (i915_modparams.enable_execlists) {
1763 const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
Chris Wilsonf636edb2017-10-09 12:02:57 +01001764 u32 ptr, read, write;
1765 unsigned int idx;
1766
1767 drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
1768 I915_READ(RING_EXECLIST_STATUS_LO(engine)),
1769 I915_READ(RING_EXECLIST_STATUS_HI(engine)));
1770
1771 ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
1772 read = GEN8_CSB_READ_PTR(ptr);
1773 write = GEN8_CSB_WRITE_PTR(ptr);
1774 drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
1775 read, execlists->csb_head,
1776 write,
1777 intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
1778 yesno(test_bit(ENGINE_IRQ_EXECLIST,
1779 &engine->irq_posted)));
1780 if (read >= GEN8_CSB_ENTRIES)
1781 read = 0;
1782 if (write >= GEN8_CSB_ENTRIES)
1783 write = 0;
1784 if (read > write)
1785 write += GEN8_CSB_ENTRIES;
1786 while (read < write) {
1787 idx = ++read % GEN8_CSB_ENTRIES;
1788 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
1789 idx,
1790 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
1791 hws[idx * 2],
1792 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
1793 hws[idx * 2 + 1]);
1794 }
1795
1796 rcu_read_lock();
1797 for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
1798 unsigned int count;
1799
1800 rq = port_unpack(&execlists->port[idx], &count);
1801 if (rq) {
1802 drm_printf(m, "\t\tELSP[%d] count=%d, ",
1803 idx, count);
1804 print_request(m, rq, "rq: ");
1805 } else {
1806 drm_printf(m, "\t\tELSP[%d] idle\n",
1807 idx);
1808 }
1809 }
Chris Wilson4a118ec2017-10-23 22:32:36 +01001810 drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
Chris Wilsonf636edb2017-10-09 12:02:57 +01001811 rcu_read_unlock();
Chris Wilsonf636edb2017-10-09 12:02:57 +01001812 } else if (INTEL_GEN(dev_priv) > 6) {
1813 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1814 I915_READ(RING_PP_DIR_BASE(engine)));
1815 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1816 I915_READ(RING_PP_DIR_BASE_READ(engine)));
1817 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1818 I915_READ(RING_PP_DIR_DCLV(engine)));
1819 }
1820
Chris Wilsona27d5a42017-10-15 21:43:10 +01001821 spin_lock_irq(&engine->timeline->lock);
1822 list_for_each_entry(rq, &engine->timeline->requests, link)
1823 print_request(m, rq, "\t\tE ");
1824 for (rb = execlists->first; rb; rb = rb_next(rb)) {
1825 struct i915_priolist *p =
1826 rb_entry(rb, typeof(*p), node);
1827
1828 list_for_each_entry(rq, &p->requests, priotree.link)
1829 print_request(m, rq, "\t\tQ ");
1830 }
1831 spin_unlock_irq(&engine->timeline->lock);
1832
Chris Wilsonf636edb2017-10-09 12:02:57 +01001833 spin_lock_irq(&b->rb_lock);
1834 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1835 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1836
1837 drm_printf(m, "\t%s [%d] waiting for %x\n",
1838 w->tsk->comm, w->tsk->pid, w->seqno);
1839 }
1840 spin_unlock_irq(&b->rb_lock);
1841
Chris Wilsonc400cc22017-11-07 15:22:11 +00001842 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
Chris Wilsonf636edb2017-10-09 12:02:57 +01001843 drm_printf(m, "\n");
1844}
1845
Chris Wilsonf97fbf92017-02-13 17:15:14 +00001846#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1847#include "selftests/mock_engine.c"
1848#endif