blob: 12486d8f534b570495241f0d0f78e10e8b48dea6 [file] [log] [blame]
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
Chris Wilsonf636edb2017-10-09 12:02:57 +010025#include <drm/drm_print.h>
26
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010027#include "i915_drv.h"
Weinan Li1fd51d92017-10-15 11:55:25 +080028#include "i915_vgpu.h"
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010029#include "intel_ringbuffer.h"
30#include "intel_lrc.h"
31
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +030032/* Haswell does have the CXT_SIZE register however it does not appear to be
33 * valid. Now, docs explain in dwords what is in the context object. The full
34 * size is 70720 bytes, however, the power context and execlist context will
35 * never be saved (power context is stored elsewhere, and execlists don't work
36 * on HSW) - so the final size, including the extra state required for the
37 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
38 */
39#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +030040
Oscar Mateo7ab4adb2018-01-11 14:55:06 -080041#define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +030042#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
43#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
Oscar Mateo3cf19342017-10-04 08:39:52 -070044#define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
Tvrtko Ursulinb86aa442018-01-11 14:55:07 -080045#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +030046
47#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
48
Oscar Mateob8400f02017-04-10 07:34:32 -070049struct engine_class_info {
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010050 const char *name;
Oscar Mateob8400f02017-04-10 07:34:32 -070051 int (*init_legacy)(struct intel_engine_cs *engine);
52 int (*init_execlists)(struct intel_engine_cs *engine);
Tvrtko Ursulin1803fcbc2017-11-10 14:26:27 +000053
54 u8 uabi_class;
Oscar Mateob8400f02017-04-10 07:34:32 -070055};
56
57static const struct engine_class_info intel_engine_classes[] = {
58 [RENDER_CLASS] = {
59 .name = "rcs",
60 .init_execlists = logical_render_ring_init,
61 .init_legacy = intel_init_render_ring_buffer,
Tvrtko Ursulin1803fcbc2017-11-10 14:26:27 +000062 .uabi_class = I915_ENGINE_CLASS_RENDER,
Oscar Mateob8400f02017-04-10 07:34:32 -070063 },
64 [COPY_ENGINE_CLASS] = {
65 .name = "bcs",
66 .init_execlists = logical_xcs_ring_init,
67 .init_legacy = intel_init_blt_ring_buffer,
Tvrtko Ursulin1803fcbc2017-11-10 14:26:27 +000068 .uabi_class = I915_ENGINE_CLASS_COPY,
Oscar Mateob8400f02017-04-10 07:34:32 -070069 },
70 [VIDEO_DECODE_CLASS] = {
71 .name = "vcs",
72 .init_execlists = logical_xcs_ring_init,
73 .init_legacy = intel_init_bsd_ring_buffer,
Tvrtko Ursulin1803fcbc2017-11-10 14:26:27 +000074 .uabi_class = I915_ENGINE_CLASS_VIDEO,
Oscar Mateob8400f02017-04-10 07:34:32 -070075 },
76 [VIDEO_ENHANCEMENT_CLASS] = {
77 .name = "vecs",
78 .init_execlists = logical_xcs_ring_init,
79 .init_legacy = intel_init_vebox_ring_buffer,
Tvrtko Ursulin1803fcbc2017-11-10 14:26:27 +000080 .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
Oscar Mateob8400f02017-04-10 07:34:32 -070081 },
82};
83
Daniele Ceraolo Spurio80b216b2018-03-14 11:26:50 -070084#define MAX_MMIO_BASES 3
Oscar Mateob8400f02017-04-10 07:34:32 -070085struct engine_info {
Michal Wajdeczko237ae7c2017-03-01 20:26:15 +000086 unsigned int hw_id;
Chris Wilson1d39f282017-04-11 13:43:06 +010087 unsigned int uabi_id;
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -070088 u8 class;
89 u8 instance;
Daniele Ceraolo Spurio80b216b2018-03-14 11:26:50 -070090 /* mmio bases table *must* be sorted in reverse gen order */
91 struct engine_mmio_base {
92 u32 gen : 8;
93 u32 base : 24;
94 } mmio_bases[MAX_MMIO_BASES];
Oscar Mateob8400f02017-04-10 07:34:32 -070095};
96
97static const struct engine_info intel_engines[] = {
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010098 [RCS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010099 .hw_id = RCS_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +0100100 .uabi_id = I915_EXEC_RENDER,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700101 .class = RENDER_CLASS,
102 .instance = 0,
Daniele Ceraolo Spurio80b216b2018-03-14 11:26:50 -0700103 .mmio_bases = {
104 { .gen = 1, .base = RENDER_RING_BASE }
105 },
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100106 },
107 [BCS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100108 .hw_id = BCS_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +0100109 .uabi_id = I915_EXEC_BLT,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700110 .class = COPY_ENGINE_CLASS,
111 .instance = 0,
Daniele Ceraolo Spurio80b216b2018-03-14 11:26:50 -0700112 .mmio_bases = {
113 { .gen = 6, .base = BLT_RING_BASE }
114 },
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100115 },
116 [VCS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100117 .hw_id = VCS_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +0100118 .uabi_id = I915_EXEC_BSD,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700119 .class = VIDEO_DECODE_CLASS,
120 .instance = 0,
Daniele Ceraolo Spurio80b216b2018-03-14 11:26:50 -0700121 .mmio_bases = {
122 { .gen = 11, .base = GEN11_BSD_RING_BASE },
123 { .gen = 6, .base = GEN6_BSD_RING_BASE },
124 { .gen = 4, .base = BSD_RING_BASE }
125 },
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100126 },
127 [VCS2] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100128 .hw_id = VCS2_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +0100129 .uabi_id = I915_EXEC_BSD,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700130 .class = VIDEO_DECODE_CLASS,
131 .instance = 1,
Daniele Ceraolo Spurio80b216b2018-03-14 11:26:50 -0700132 .mmio_bases = {
133 { .gen = 11, .base = GEN11_BSD2_RING_BASE },
134 { .gen = 8, .base = GEN8_BSD2_RING_BASE }
135 },
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100136 },
Oscar Mateo5f79e7c2018-03-02 18:14:57 +0200137 [VCS3] = {
138 .hw_id = VCS3_HW,
139 .uabi_id = I915_EXEC_BSD,
140 .class = VIDEO_DECODE_CLASS,
141 .instance = 2,
Daniele Ceraolo Spurio80b216b2018-03-14 11:26:50 -0700142 .mmio_bases = {
143 { .gen = 11, .base = GEN11_BSD3_RING_BASE }
144 },
Oscar Mateo5f79e7c2018-03-02 18:14:57 +0200145 },
146 [VCS4] = {
147 .hw_id = VCS4_HW,
148 .uabi_id = I915_EXEC_BSD,
149 .class = VIDEO_DECODE_CLASS,
150 .instance = 3,
Daniele Ceraolo Spurio80b216b2018-03-14 11:26:50 -0700151 .mmio_bases = {
152 { .gen = 11, .base = GEN11_BSD4_RING_BASE }
153 },
Oscar Mateo5f79e7c2018-03-02 18:14:57 +0200154 },
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100155 [VECS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100156 .hw_id = VECS_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +0100157 .uabi_id = I915_EXEC_VEBOX,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700158 .class = VIDEO_ENHANCEMENT_CLASS,
159 .instance = 0,
Daniele Ceraolo Spurio80b216b2018-03-14 11:26:50 -0700160 .mmio_bases = {
161 { .gen = 11, .base = GEN11_VEBOX_RING_BASE },
162 { .gen = 7, .base = VEBOX_RING_BASE }
163 },
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100164 },
Oscar Mateo5f79e7c2018-03-02 18:14:57 +0200165 [VECS2] = {
166 .hw_id = VECS2_HW,
167 .uabi_id = I915_EXEC_VEBOX,
168 .class = VIDEO_ENHANCEMENT_CLASS,
169 .instance = 1,
Daniele Ceraolo Spurio80b216b2018-03-14 11:26:50 -0700170 .mmio_bases = {
171 { .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
172 },
Oscar Mateo5f79e7c2018-03-02 18:14:57 +0200173 },
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100174};
175
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300176/**
177 * ___intel_engine_context_size() - return the size of the context for an engine
178 * @dev_priv: i915 device private
179 * @class: engine class
180 *
181 * Each engine class may require a different amount of space for a context
182 * image.
183 *
184 * Return: size (in bytes) of an engine class specific context image
185 *
186 * Note: this size includes the HWSP, which is part of the context image
187 * in LRC mode, but does not include the "shared data page" used with
188 * GuC submission. The caller should account for this if using the GuC.
189 */
190static u32
191__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
192{
193 u32 cxt_size;
194
195 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
196
197 switch (class) {
198 case RENDER_CLASS:
199 switch (INTEL_GEN(dev_priv)) {
200 default:
201 MISSING_CASE(INTEL_GEN(dev_priv));
Oscar Mateo7ab4adb2018-01-11 14:55:06 -0800202 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
Tvrtko Ursulinb86aa442018-01-11 14:55:07 -0800203 case 11:
204 return GEN11_LR_CONTEXT_RENDER_SIZE;
Rodrigo Vivif65f8412017-07-06 14:06:24 -0700205 case 10:
Oscar Mateo7fd0b1a2017-09-21 16:19:49 -0700206 return GEN10_LR_CONTEXT_RENDER_SIZE;
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300207 case 9:
208 return GEN9_LR_CONTEXT_RENDER_SIZE;
209 case 8:
Chris Wilsonfb5c5512017-11-20 20:55:00 +0000210 return GEN8_LR_CONTEXT_RENDER_SIZE;
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300211 case 7:
212 if (IS_HASWELL(dev_priv))
213 return HSW_CXT_TOTAL_SIZE;
214
215 cxt_size = I915_READ(GEN7_CXT_SIZE);
216 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
217 PAGE_SIZE);
218 case 6:
219 cxt_size = I915_READ(CXT_SIZE);
220 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
221 PAGE_SIZE);
222 case 5:
223 case 4:
224 case 3:
225 case 2:
226 /* For the special day when i810 gets merged. */
227 case 1:
228 return 0;
229 }
230 break;
231 default:
232 MISSING_CASE(class);
233 case VIDEO_DECODE_CLASS:
234 case VIDEO_ENHANCEMENT_CLASS:
235 case COPY_ENGINE_CLASS:
236 if (INTEL_GEN(dev_priv) < 8)
237 return 0;
238 return GEN8_LR_CONTEXT_OTHER_SIZE;
239 }
240}
241
Daniele Ceraolo Spurio80b216b2018-03-14 11:26:50 -0700242static u32 __engine_mmio_base(struct drm_i915_private *i915,
243 const struct engine_mmio_base *bases)
244{
245 int i;
246
247 for (i = 0; i < MAX_MMIO_BASES; i++)
248 if (INTEL_GEN(i915) >= bases[i].gen)
249 break;
250
251 GEM_BUG_ON(i == MAX_MMIO_BASES);
252 GEM_BUG_ON(!bases[i].base);
253
254 return bases[i].base;
255}
256
Daniele Ceraolo Spurio74419da2018-03-14 11:26:51 -0700257static void __sprint_engine_name(char *name, const struct engine_info *info)
258{
259 WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
260 intel_engine_classes[info->class].name,
261 info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
262}
263
Akash Goel3b3f1652016-10-13 22:44:48 +0530264static int
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100265intel_engine_setup(struct drm_i915_private *dev_priv,
266 enum intel_engine_id id)
267{
268 const struct engine_info *info = &intel_engines[id];
Akash Goel3b3f1652016-10-13 22:44:48 +0530269 struct intel_engine_cs *engine;
270
Oscar Mateob8400f02017-04-10 07:34:32 -0700271 GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
Oscar Mateob8400f02017-04-10 07:34:32 -0700272
Daniele Ceraolo Spurioac52da62018-03-02 18:14:58 +0200273 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
274 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
275
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000276 if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
277 return -EINVAL;
278
279 if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
280 return -EINVAL;
281
282 if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
283 return -EINVAL;
284
Akash Goel3b3f1652016-10-13 22:44:48 +0530285 GEM_BUG_ON(dev_priv->engine[id]);
286 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
287 if (!engine)
288 return -ENOMEM;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100289
290 engine->id = id;
291 engine->i915 = dev_priv;
Daniele Ceraolo Spurio74419da2018-03-14 11:26:51 -0700292 __sprint_engine_name(engine->name, info);
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100293 engine->hw_id = engine->guc_id = info->hw_id;
Daniele Ceraolo Spurio80b216b2018-03-14 11:26:50 -0700294 engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700295 engine->class = info->class;
296 engine->instance = info->instance;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100297
Tvrtko Ursulin1803fcbc2017-11-10 14:26:27 +0000298 engine->uabi_id = info->uabi_id;
Daniele Ceraolo Spurio74419da2018-03-14 11:26:51 -0700299 engine->uabi_class = intel_engine_classes[info->class].uabi_class;
Tvrtko Ursulin1803fcbc2017-11-10 14:26:27 +0000300
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300301 engine->context_size = __intel_engine_context_size(dev_priv,
302 engine->class);
303 if (WARN_ON(engine->context_size > BIT(20)))
304 engine->context_size = 0;
305
Chris Wilson0de91362016-11-14 20:41:01 +0000306 /* Nothing to do here, execute in order of dependencies */
307 engine->schedule = NULL;
308
Tvrtko Ursulin30e17b72017-11-21 18:18:48 +0000309 spin_lock_init(&engine->stats.lock);
310
Changbin Du3fc03062017-03-13 10:47:11 +0800311 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
312
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000313 dev_priv->engine_class[info->class][info->instance] = engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530314 dev_priv->engine[id] = engine;
315 return 0;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100316}
317
318/**
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300319 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +0000320 * @dev_priv: i915 device private
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100321 *
322 * Return: non-zero if the initialization failed.
323 */
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300324int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100325{
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100326 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
Chris Wilson5f9be052017-04-11 17:56:58 +0100327 const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
Akash Goel3b3f1652016-10-13 22:44:48 +0530328 struct intel_engine_cs *engine;
329 enum intel_engine_id id;
Chris Wilson5f9be052017-04-11 17:56:58 +0100330 unsigned int mask = 0;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100331 unsigned int i;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000332 int err;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100333
Tvrtko Ursulin70006ad2016-10-13 11:02:56 +0100334 WARN_ON(ring_mask == 0);
335 WARN_ON(ring_mask &
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100336 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
337
338 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
339 if (!HAS_ENGINE(dev_priv, i))
340 continue;
341
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000342 err = intel_engine_setup(dev_priv, i);
343 if (err)
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100344 goto cleanup;
345
346 mask |= ENGINE_MASK(i);
347 }
348
349 /*
350 * Catch failures to update intel_engines table when the new engines
351 * are added to the driver by a warning and disabling the forgotten
352 * engines.
353 */
Tvrtko Ursulin70006ad2016-10-13 11:02:56 +0100354 if (WARN_ON(mask != ring_mask))
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100355 device_info->ring_mask = mask;
356
Chris Wilson5f9be052017-04-11 17:56:58 +0100357 /* We always presume we have at least RCS available for later probing */
358 if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
359 err = -ENODEV;
360 goto cleanup;
361 }
362
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100363 device_info->num_rings = hweight32(mask);
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100364
Michel Thierryce453b32017-11-10 16:44:47 -0800365 i915_check_and_clear_faults(dev_priv);
366
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100367 return 0;
368
369cleanup:
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000370 for_each_engine(engine, dev_priv, id)
371 kfree(engine);
372 return err;
373}
374
375/**
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300376 * intel_engines_init() - init the Engine Command Streamers
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000377 * @dev_priv: i915 device private
378 *
379 * Return: non-zero if the initialization failed.
380 */
381int intel_engines_init(struct drm_i915_private *dev_priv)
382{
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000383 struct intel_engine_cs *engine;
384 enum intel_engine_id id, err_id;
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100385 int err;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000386
Akash Goel3b3f1652016-10-13 22:44:48 +0530387 for_each_engine(engine, dev_priv, id) {
Oscar Mateob8400f02017-04-10 07:34:32 -0700388 const struct engine_class_info *class_info =
389 &intel_engine_classes[engine->class];
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000390 int (*init)(struct intel_engine_cs *engine);
391
Chris Wilsonfb5c5512017-11-20 20:55:00 +0000392 if (HAS_EXECLISTS(dev_priv))
Oscar Mateob8400f02017-04-10 07:34:32 -0700393 init = class_info->init_execlists;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000394 else
Oscar Mateob8400f02017-04-10 07:34:32 -0700395 init = class_info->init_legacy;
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100396
397 err = -EINVAL;
398 err_id = id;
399
400 if (GEM_WARN_ON(!init))
401 goto cleanup;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000402
403 err = init(engine);
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100404 if (err)
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000405 goto cleanup;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000406
Chris Wilsonff44ad52017-03-16 17:13:03 +0000407 GEM_BUG_ON(!engine->submit_request);
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000408 }
409
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000410 return 0;
411
412cleanup:
413 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100414 if (id >= err_id) {
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000415 kfree(engine);
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100416 dev_priv->engine[id] = NULL;
417 } else {
Tvrtko Ursulin8ee7c6e2017-02-16 12:23:22 +0000418 dev_priv->gt.cleanup_engine(engine);
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100419 }
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100420 }
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000421 return err;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100422}
423
Chris Wilson73cb9702016-10-28 13:58:46 +0100424void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
Chris Wilson57f275a2016-08-15 10:49:00 +0100425{
426 struct drm_i915_private *dev_priv = engine->i915;
427
428 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
429 * so long as the semaphore value in the register/page is greater
430 * than the sync value), so whenever we reset the seqno,
431 * so long as we reset the tracking semaphore value to 0, it will
432 * always be before the next request's seqno. If we don't reset
433 * the semaphore value, then when the seqno moves backwards all
434 * future waits will complete instantly (causing rendering corruption).
435 */
436 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
437 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
438 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
439 if (HAS_VEBOX(dev_priv))
440 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
441 }
Chris Wilson57f275a2016-08-15 10:49:00 +0100442
443 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
Chris Wilson14a6bbf2017-03-14 11:14:52 +0000444 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
Chris Wilson73cb9702016-10-28 13:58:46 +0100445
Chris Wilson57f275a2016-08-15 10:49:00 +0100446 /* After manually advancing the seqno, fake the interrupt in case
447 * there are any waiters for that seqno.
448 */
449 intel_engine_wakeup(engine);
Chris Wilson2ca9faa2017-04-05 16:30:54 +0100450
451 GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
Chris Wilson57f275a2016-08-15 10:49:00 +0100452}
453
Chris Wilson73cb9702016-10-28 13:58:46 +0100454static void intel_engine_init_timeline(struct intel_engine_cs *engine)
Chris Wilsondcff85c2016-08-05 10:14:11 +0100455{
Chris Wilson73cb9702016-10-28 13:58:46 +0100456 engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
Chris Wilsondcff85c2016-08-05 10:14:11 +0100457}
458
Michal Wajdeczkoc5781352018-03-08 09:50:35 +0000459static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
460{
461 i915_gem_batch_pool_init(&engine->batch_pool, engine);
462}
463
Mika Kuoppala19df9a52017-09-22 15:43:04 +0300464static bool csb_force_mmio(struct drm_i915_private *i915)
465{
Mika Kuoppala19df9a52017-09-22 15:43:04 +0300466 /*
467 * IOMMU adds unpredictable latency causing the CSB write (from the
468 * GPU into the HWSP) to only be visible some time after the interrupt
469 * (missed breadcrumb syndrome).
470 */
471 if (intel_vtd_active())
472 return true;
473
Weinan Li1fd51d92017-10-15 11:55:25 +0800474 /* Older GVT emulation depends upon intercepting CSB mmio */
475 if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
476 return true;
477
Mika Kuoppala19df9a52017-09-22 15:43:04 +0300478 return false;
479}
480
481static void intel_engine_init_execlist(struct intel_engine_cs *engine)
482{
483 struct intel_engine_execlists * const execlists = &engine->execlists;
484
485 execlists->csb_use_mmio = csb_force_mmio(engine->i915);
486
Mika Kuoppala76e70082017-09-22 15:43:07 +0300487 execlists->port_mask = 1;
488 BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
489 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
490
Chris Wilsonf6322ed2018-02-22 14:22:29 +0000491 execlists->queue_priority = INT_MIN;
Mika Kuoppala19df9a52017-09-22 15:43:04 +0300492 execlists->queue = RB_ROOT;
493 execlists->first = NULL;
494}
495
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100496/**
497 * intel_engines_setup_common - setup engine state not requiring hw access
498 * @engine: Engine to setup.
499 *
500 * Initializes @engine@ structure members shared between legacy and execlists
501 * submission modes which do not require hardware access.
502 *
503 * Typically done early in the submission mode specific engine setup stage.
504 */
505void intel_engine_setup_common(struct intel_engine_cs *engine)
506{
Mika Kuoppala19df9a52017-09-22 15:43:04 +0300507 intel_engine_init_execlist(engine);
Chris Wilson73cb9702016-10-28 13:58:46 +0100508 intel_engine_init_timeline(engine);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100509 intel_engine_init_hangcheck(engine);
Michal Wajdeczkoc5781352018-03-08 09:50:35 +0000510 intel_engine_init_batch_pool(engine);
Chris Wilson7756e452016-08-18 17:17:10 +0100511 intel_engine_init_cmd_parser(engine);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100512}
513
Chris Wilsonadc320c2016-08-15 10:48:59 +0100514int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
515{
516 struct drm_i915_gem_object *obj;
517 struct i915_vma *vma;
518 int ret;
519
520 WARN_ON(engine->scratch);
521
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000522 obj = i915_gem_object_create_stolen(engine->i915, size);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100523 if (!obj)
Chris Wilson920cf412016-10-28 13:58:30 +0100524 obj = i915_gem_object_create_internal(engine->i915, size);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100525 if (IS_ERR(obj)) {
526 DRM_ERROR("Failed to allocate scratch page\n");
527 return PTR_ERR(obj);
528 }
529
Chris Wilsona01cb372017-01-16 15:21:30 +0000530 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100531 if (IS_ERR(vma)) {
532 ret = PTR_ERR(vma);
533 goto err_unref;
534 }
535
536 ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
537 if (ret)
538 goto err_unref;
539
540 engine->scratch = vma;
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100541 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
542 engine->name, i915_ggtt_offset(vma));
Chris Wilsonadc320c2016-08-15 10:48:59 +0100543 return 0;
544
545err_unref:
546 i915_gem_object_put(obj);
547 return ret;
548}
549
550static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
551{
Chris Wilson19880c42016-08-15 10:49:05 +0100552 i915_vma_unpin_and_release(&engine->scratch);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100553}
554
Daniele Ceraolo Spurio486e93f2017-09-13 09:56:02 +0100555static void cleanup_phys_status_page(struct intel_engine_cs *engine)
556{
557 struct drm_i915_private *dev_priv = engine->i915;
558
559 if (!dev_priv->status_page_dmah)
560 return;
561
562 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
563 engine->status_page.page_addr = NULL;
564}
565
566static void cleanup_status_page(struct intel_engine_cs *engine)
567{
568 struct i915_vma *vma;
569 struct drm_i915_gem_object *obj;
570
571 vma = fetch_and_zero(&engine->status_page.vma);
572 if (!vma)
573 return;
574
575 obj = vma->obj;
576
577 i915_vma_unpin(vma);
578 i915_vma_close(vma);
579
580 i915_gem_object_unpin_map(obj);
581 __i915_gem_object_release_unless_active(obj);
582}
583
584static int init_status_page(struct intel_engine_cs *engine)
585{
586 struct drm_i915_gem_object *obj;
587 struct i915_vma *vma;
588 unsigned int flags;
589 void *vaddr;
590 int ret;
591
592 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
593 if (IS_ERR(obj)) {
594 DRM_ERROR("Failed to allocate status page\n");
595 return PTR_ERR(obj);
596 }
597
598 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
599 if (ret)
600 goto err;
601
602 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
603 if (IS_ERR(vma)) {
604 ret = PTR_ERR(vma);
605 goto err;
606 }
607
608 flags = PIN_GLOBAL;
609 if (!HAS_LLC(engine->i915))
610 /* On g33, we cannot place HWS above 256MiB, so
611 * restrict its pinning to the low mappable arena.
612 * Though this restriction is not documented for
613 * gen4, gen5, or byt, they also behave similarly
614 * and hang if the HWS is placed at the top of the
615 * GTT. To generalise, it appears that all !llc
616 * platforms have issues with us placing the HWS
617 * above the mappable region (even though we never
618 * actually map it).
619 */
620 flags |= PIN_MAPPABLE;
Chris Wilson34a04e52017-09-13 09:56:03 +0100621 else
622 flags |= PIN_HIGH;
Daniele Ceraolo Spurio486e93f2017-09-13 09:56:02 +0100623 ret = i915_vma_pin(vma, 0, 4096, flags);
624 if (ret)
625 goto err;
626
627 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
628 if (IS_ERR(vaddr)) {
629 ret = PTR_ERR(vaddr);
630 goto err_unpin;
631 }
632
633 engine->status_page.vma = vma;
634 engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
635 engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
636
637 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
638 engine->name, i915_ggtt_offset(vma));
639 return 0;
640
641err_unpin:
642 i915_vma_unpin(vma);
643err:
644 i915_gem_object_put(obj);
645 return ret;
646}
647
648static int init_phys_status_page(struct intel_engine_cs *engine)
649{
650 struct drm_i915_private *dev_priv = engine->i915;
651
652 GEM_BUG_ON(engine->id != RCS);
653
654 dev_priv->status_page_dmah =
655 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
656 if (!dev_priv->status_page_dmah)
657 return -ENOMEM;
658
659 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
660 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
661
662 return 0;
663}
664
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100665/**
666 * intel_engines_init_common - initialize cengine state which might require hw access
667 * @engine: Engine to initialize.
668 *
669 * Initializes @engine@ structure members shared between legacy and execlists
670 * submission modes which do require hardware access.
671 *
672 * Typcally done at later stages of submission mode specific engine setup.
673 *
674 * Returns zero on success or an error code on failure.
675 */
676int intel_engine_init_common(struct intel_engine_cs *engine)
677{
Chris Wilson266a2402017-05-04 10:33:08 +0100678 struct intel_ring *ring;
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100679 int ret;
680
Chris Wilsonff44ad52017-03-16 17:13:03 +0000681 engine->set_default_submission(engine);
682
Chris Wilsone8a9c582016-12-18 15:37:20 +0000683 /* We may need to do things with the shrinker which
684 * require us to immediately switch back to the default
685 * context. This can cause a problem as pinning the
686 * default context also requires GTT space which may not
687 * be available. To avoid this we always pin the default
688 * context.
689 */
Chris Wilson266a2402017-05-04 10:33:08 +0100690 ring = engine->context_pin(engine, engine->i915->kernel_context);
691 if (IS_ERR(ring))
692 return PTR_ERR(ring);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100693
Chris Wilsone7af3112017-10-03 21:34:48 +0100694 /*
695 * Similarly the preempt context must always be available so that
696 * we can interrupt the engine at any time.
697 */
Chris Wilsond6376372018-02-07 21:05:44 +0000698 if (engine->i915->preempt_context) {
Chris Wilsone7af3112017-10-03 21:34:48 +0100699 ring = engine->context_pin(engine,
700 engine->i915->preempt_context);
701 if (IS_ERR(ring)) {
702 ret = PTR_ERR(ring);
703 goto err_unpin_kernel;
704 }
705 }
706
Chris Wilsone8a9c582016-12-18 15:37:20 +0000707 ret = intel_engine_init_breadcrumbs(engine);
708 if (ret)
Chris Wilsone7af3112017-10-03 21:34:48 +0100709 goto err_unpin_preempt;
Chris Wilsone8a9c582016-12-18 15:37:20 +0000710
Daniele Ceraolo Spurio486e93f2017-09-13 09:56:02 +0100711 if (HWS_NEEDS_PHYSICAL(engine->i915))
712 ret = init_phys_status_page(engine);
713 else
714 ret = init_status_page(engine);
715 if (ret)
Chris Wilson7c2fa7f2017-11-10 14:26:34 +0000716 goto err_breadcrumbs;
Chris Wilson4e50f082016-10-28 13:58:31 +0100717
Chris Wilson7756e452016-08-18 17:17:10 +0100718 return 0;
Chris Wilsone8a9c582016-12-18 15:37:20 +0000719
Daniele Ceraolo Spurio486e93f2017-09-13 09:56:02 +0100720err_breadcrumbs:
721 intel_engine_fini_breadcrumbs(engine);
Chris Wilsone7af3112017-10-03 21:34:48 +0100722err_unpin_preempt:
Chris Wilsond6376372018-02-07 21:05:44 +0000723 if (engine->i915->preempt_context)
Chris Wilsone7af3112017-10-03 21:34:48 +0100724 engine->context_unpin(engine, engine->i915->preempt_context);
725err_unpin_kernel:
Chris Wilsone8a9c582016-12-18 15:37:20 +0000726 engine->context_unpin(engine, engine->i915->kernel_context);
727 return ret;
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100728}
Chris Wilson96a945a2016-08-03 13:19:16 +0100729
730/**
731 * intel_engines_cleanup_common - cleans up the engine state created by
732 * the common initiailizers.
733 * @engine: Engine to cleanup.
734 *
735 * This cleans up everything created by the common helpers.
736 */
737void intel_engine_cleanup_common(struct intel_engine_cs *engine)
738{
Chris Wilsonadc320c2016-08-15 10:48:59 +0100739 intel_engine_cleanup_scratch(engine);
740
Daniele Ceraolo Spurio486e93f2017-09-13 09:56:02 +0100741 if (HWS_NEEDS_PHYSICAL(engine->i915))
742 cleanup_phys_status_page(engine);
743 else
744 cleanup_status_page(engine);
745
Chris Wilson96a945a2016-08-03 13:19:16 +0100746 intel_engine_fini_breadcrumbs(engine);
Chris Wilson7756e452016-08-18 17:17:10 +0100747 intel_engine_cleanup_cmd_parser(engine);
Chris Wilson96a945a2016-08-03 13:19:16 +0100748 i915_gem_batch_pool_fini(&engine->batch_pool);
Chris Wilsone8a9c582016-12-18 15:37:20 +0000749
Chris Wilsond2b4b972017-11-10 14:26:33 +0000750 if (engine->default_state)
751 i915_gem_object_put(engine->default_state);
752
Chris Wilsond6376372018-02-07 21:05:44 +0000753 if (engine->i915->preempt_context)
Chris Wilsone7af3112017-10-03 21:34:48 +0100754 engine->context_unpin(engine, engine->i915->preempt_context);
Chris Wilsone8a9c582016-12-18 15:37:20 +0000755 engine->context_unpin(engine, engine->i915->kernel_context);
Chris Wilson96a945a2016-08-03 13:19:16 +0100756}
Chris Wilson1b365952016-10-04 21:11:31 +0100757
Chris Wilson3ceda3a2018-02-12 10:24:15 +0000758u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
Chris Wilson1b365952016-10-04 21:11:31 +0100759{
760 struct drm_i915_private *dev_priv = engine->i915;
761 u64 acthd;
762
763 if (INTEL_GEN(dev_priv) >= 8)
764 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
765 RING_ACTHD_UDW(engine->mmio_base));
766 else if (INTEL_GEN(dev_priv) >= 4)
767 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
768 else
769 acthd = I915_READ(ACTHD);
770
771 return acthd;
772}
773
Chris Wilson3ceda3a2018-02-12 10:24:15 +0000774u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
Chris Wilson1b365952016-10-04 21:11:31 +0100775{
776 struct drm_i915_private *dev_priv = engine->i915;
777 u64 bbaddr;
778
779 if (INTEL_GEN(dev_priv) >= 8)
780 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
781 RING_BBADDR_UDW(engine->mmio_base));
782 else
783 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
784
785 return bbaddr;
786}
Chris Wilson0e704472016-10-12 10:05:17 +0100787
788const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
789{
790 switch (type) {
791 case I915_CACHE_NONE: return " uncached";
792 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
793 case I915_CACHE_L3_LLC: return " L3+LLC";
794 case I915_CACHE_WT: return " WT";
795 default: return "";
796 }
797}
798
799static inline uint32_t
800read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
801 int subslice, i915_reg_t reg)
802{
Kelvin Gardinerd3d57922018-03-16 14:14:51 +0200803 uint32_t mcr_slice_subslice_mask;
804 uint32_t mcr_slice_subslice_select;
Chris Wilson0e704472016-10-12 10:05:17 +0100805 uint32_t mcr;
806 uint32_t ret;
807 enum forcewake_domains fw_domains;
808
Kelvin Gardinerd3d57922018-03-16 14:14:51 +0200809 if (INTEL_GEN(dev_priv) >= 11) {
810 mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
811 GEN11_MCR_SUBSLICE_MASK;
812 mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
813 GEN11_MCR_SUBSLICE(subslice);
814 } else {
815 mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
816 GEN8_MCR_SUBSLICE_MASK;
817 mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
818 GEN8_MCR_SUBSLICE(subslice);
819 }
820
Chris Wilson0e704472016-10-12 10:05:17 +0100821 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
822 FW_REG_READ);
823 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
824 GEN8_MCR_SELECTOR,
825 FW_REG_READ | FW_REG_WRITE);
826
827 spin_lock_irq(&dev_priv->uncore.lock);
828 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
829
830 mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
831 /*
832 * The HW expects the slice and sublice selectors to be reset to 0
833 * after reading out the registers.
834 */
Kelvin Gardinerd3d57922018-03-16 14:14:51 +0200835 WARN_ON_ONCE(mcr & mcr_slice_subslice_mask);
836 mcr &= ~mcr_slice_subslice_mask;
837 mcr |= mcr_slice_subslice_select;
Chris Wilson0e704472016-10-12 10:05:17 +0100838 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
839
840 ret = I915_READ_FW(reg);
841
Kelvin Gardinerd3d57922018-03-16 14:14:51 +0200842 mcr &= ~mcr_slice_subslice_mask;
Chris Wilson0e704472016-10-12 10:05:17 +0100843 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
844
845 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
846 spin_unlock_irq(&dev_priv->uncore.lock);
847
848 return ret;
849}
850
851/* NB: please notice the memset */
852void intel_engine_get_instdone(struct intel_engine_cs *engine,
853 struct intel_instdone *instdone)
854{
855 struct drm_i915_private *dev_priv = engine->i915;
856 u32 mmio_base = engine->mmio_base;
857 int slice;
858 int subslice;
859
860 memset(instdone, 0, sizeof(*instdone));
861
862 switch (INTEL_GEN(dev_priv)) {
863 default:
864 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
865
866 if (engine->id != RCS)
867 break;
868
869 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
870 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
871 instdone->sampler[slice][subslice] =
872 read_subslice_reg(dev_priv, slice, subslice,
873 GEN7_SAMPLER_INSTDONE);
874 instdone->row[slice][subslice] =
875 read_subslice_reg(dev_priv, slice, subslice,
876 GEN7_ROW_INSTDONE);
877 }
878 break;
879 case 7:
880 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
881
882 if (engine->id != RCS)
883 break;
884
885 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
886 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
887 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
888
889 break;
890 case 6:
891 case 5:
892 case 4:
893 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
894
895 if (engine->id == RCS)
896 /* HACK: Using the wrong struct member */
897 instdone->slice_common = I915_READ(GEN4_INSTDONE1);
898 break;
899 case 3:
900 case 2:
901 instdone->instdone = I915_READ(GEN2_INSTDONE);
902 break;
903 }
904}
Chris Wilsonf97fbf92017-02-13 17:15:14 +0000905
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000906static int wa_add(struct drm_i915_private *dev_priv,
907 i915_reg_t addr,
908 const u32 mask, const u32 val)
909{
910 const u32 idx = dev_priv->workarounds.count;
911
912 if (WARN_ON(idx >= I915_MAX_WA_REGS))
913 return -ENOSPC;
914
915 dev_priv->workarounds.reg[idx].addr = addr;
916 dev_priv->workarounds.reg[idx].value = val;
917 dev_priv->workarounds.reg[idx].mask = mask;
918
919 dev_priv->workarounds.count++;
920
921 return 0;
922}
923
924#define WA_REG(addr, mask, val) do { \
925 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
926 if (r) \
927 return r; \
928 } while (0)
929
930#define WA_SET_BIT_MASKED(addr, mask) \
931 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
932
933#define WA_CLR_BIT_MASKED(addr, mask) \
934 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
935
936#define WA_SET_FIELD_MASKED(addr, mask, value) \
937 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
938
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000939static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
940 i915_reg_t reg)
941{
942 struct drm_i915_private *dev_priv = engine->i915;
943 struct i915_workarounds *wa = &dev_priv->workarounds;
944 const uint32_t index = wa->hw_whitelist_count[engine->id];
945
946 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
947 return -EINVAL;
948
Oscar Mateo32ced392017-09-28 15:40:39 -0700949 I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
950 i915_mmio_reg_offset(reg));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000951 wa->hw_whitelist_count[engine->id]++;
952
953 return 0;
954}
955
956static int gen8_init_workarounds(struct intel_engine_cs *engine)
957{
958 struct drm_i915_private *dev_priv = engine->i915;
959
960 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
961
962 /* WaDisableAsyncFlipPerfMode:bdw,chv */
963 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
964
965 /* WaDisablePartialInstShootdown:bdw,chv */
966 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
967 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
968
969 /* Use Force Non-Coherent whenever executing a 3D context. This is a
970 * workaround for for a possible hang in the unlikely event a TLB
971 * invalidation occurs during a PSD flush.
972 */
973 /* WaForceEnableNonCoherent:bdw,chv */
974 /* WaHdcDisableFetchWhenMasked:bdw,chv */
975 WA_SET_BIT_MASKED(HDC_CHICKEN0,
976 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
977 HDC_FORCE_NON_COHERENT);
978
979 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
980 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
981 * polygons in the same 8x4 pixel/sample area to be processed without
982 * stalling waiting for the earlier ones to write to Hierarchical Z
983 * buffer."
984 *
985 * This optimization is off by default for BDW and CHV; turn it on.
986 */
987 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
988
989 /* Wa4x4STCOptimizationDisable:bdw,chv */
990 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
991
992 /*
993 * BSpec recommends 8x4 when MSAA is used,
994 * however in practice 16x4 seems fastest.
995 *
996 * Note that PS/WM thread counts depend on the WIZ hashing
997 * disable bit, which we don't touch here, but it's good
998 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
999 */
1000 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
1001 GEN6_WIZ_HASHING_MASK,
1002 GEN6_WIZ_HASHING_16x4);
1003
1004 return 0;
1005}
1006
1007static int bdw_init_workarounds(struct intel_engine_cs *engine)
1008{
1009 struct drm_i915_private *dev_priv = engine->i915;
1010 int ret;
1011
1012 ret = gen8_init_workarounds(engine);
1013 if (ret)
1014 return ret;
1015
1016 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
1017 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
1018
1019 /* WaDisableDopClockGating:bdw
1020 *
1021 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
1022 * to disable EUTC clock gating.
1023 */
1024 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
1025 DOP_CLOCK_GATING_DISABLE);
1026
1027 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
1028 GEN8_SAMPLER_POWER_BYPASS_DIS);
1029
1030 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1031 /* WaForceContextSaveRestoreNonCoherent:bdw */
1032 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
1033 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
1034 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
1035
1036 return 0;
1037}
1038
1039static int chv_init_workarounds(struct intel_engine_cs *engine)
1040{
1041 struct drm_i915_private *dev_priv = engine->i915;
1042 int ret;
1043
1044 ret = gen8_init_workarounds(engine);
1045 if (ret)
1046 return ret;
1047
1048 /* WaDisableThreadStallDopClockGating:chv */
1049 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
1050
1051 /* Improve HiZ throughput on CHV. */
1052 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
1053
1054 return 0;
1055}
1056
1057static int gen9_init_workarounds(struct intel_engine_cs *engine)
1058{
1059 struct drm_i915_private *dev_priv = engine->i915;
1060 int ret;
1061
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001062 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001063 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
1064
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001065 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001066 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
1067 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
1068
Rodrigo Vivi98eed3d2017-06-19 14:21:47 -07001069 /* WaDisableKillLogic:bxt,skl,kbl */
1070 if (!IS_COFFEELAKE(dev_priv))
1071 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
1072 ECOCHK_DIS_TLB);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001073
Ville Syrjälä93564042017-08-24 22:10:51 +03001074 if (HAS_LLC(dev_priv)) {
1075 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
1076 *
1077 * Must match Display Engine. See
1078 * WaCompressedResourceDisplayNewHashMode.
1079 */
1080 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1081 GEN9_PBE_COMPRESSED_HASH_SELECTION);
1082 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
1083 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
Chris Wilson53221e12017-10-04 13:41:52 +01001084
1085 I915_WRITE(MMCD_MISC_CTRL,
1086 I915_READ(MMCD_MISC_CTRL) |
1087 MMCD_PCLA |
1088 MMCD_HOTSPOT_EN);
Ville Syrjälä93564042017-08-24 22:10:51 +03001089 }
1090
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001091 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
1092 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001093 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1094 FLOW_CONTROL_ENABLE |
1095 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
1096
1097 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001098 if (!IS_COFFEELAKE(dev_priv))
1099 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
1100 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001101
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001102 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
1103 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001104 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
Arkadiusz Hiler0b71cea2017-05-12 13:20:15 +02001105 GEN9_ENABLE_YV12_BUGFIX |
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001106 GEN9_ENABLE_GPGPU_PREEMPTION);
1107
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001108 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
1109 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001110 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
1111 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
1112
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001113 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001114 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
1115 GEN9_CCS_TLB_PREFETCH_ENABLE);
1116
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001117 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001118 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1119 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
1120 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
1121
1122 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
1123 * both tied to WaForceContextSaveRestoreNonCoherent
1124 * in some hsds for skl. We keep the tie for all gen9. The
1125 * documentation is a bit hazy and so we want to get common behaviour,
1126 * even though there is no clear evidence we would need both on kbl/bxt.
1127 * This area has been source of system hangs so we play it safe
1128 * and mimic the skl regardless of what bspec says.
1129 *
1130 * Use Force Non-Coherent whenever executing a 3D context. This
1131 * is a workaround for a possible hang in the unlikely event
1132 * a TLB invalidation occurs during a PSD flush.
1133 */
1134
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001135 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001136 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1137 HDC_FORCE_NON_COHERENT);
1138
Rodrigo Vivi98eed3d2017-06-19 14:21:47 -07001139 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
1140 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
1141 BDW_DISABLE_HDC_INVALIDATION);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001142
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001143 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001144 if (IS_SKYLAKE(dev_priv) ||
1145 IS_KABYLAKE(dev_priv) ||
Chris Wilsonf3e2b2c2017-11-14 13:43:39 +00001146 IS_COFFEELAKE(dev_priv))
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001147 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
1148 GEN8_SAMPLER_POWER_BYPASS_DIS);
1149
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001150 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001151 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
1152
Valtteri Rantala74368302017-11-28 16:45:05 +02001153 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
1154 if (IS_GEN9_LP(dev_priv)) {
1155 u32 val = I915_READ(GEN8_L3SQCREG1);
1156
1157 val &= ~L3_PRIO_CREDITS_MASK;
1158 val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
1159 I915_WRITE(GEN8_L3SQCREG1, val);
1160 }
1161
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001162 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001163 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
1164 GEN8_LQSC_FLUSH_COHERENT_LINES));
1165
Michał Winiarski5152def2017-10-03 21:34:46 +01001166 /*
1167 * Supporting preemption with fine-granularity requires changes in the
1168 * batch buffer programming. Since we can't break old userspace, we
1169 * need to set our default preemption level to safe value. Userspace is
1170 * still able to use more fine-grained preemption levels, since in
1171 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
1172 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
1173 * not real HW workarounds, but merely a way to start using preemption
1174 * while maintaining old contract with userspace.
1175 */
1176
1177 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
1178 WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
1179
1180 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
1181 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
1182 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
1183
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001184 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001185 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
1186 if (ret)
1187 return ret;
1188
Jeff McGee1e998342017-10-03 21:34:45 +01001189 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1190 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1191 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1192 ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001193 if (ret)
1194 return ret;
1195
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001196 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001197 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
1198 if (ret)
1199 return ret;
1200
1201 return 0;
1202}
1203
1204static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1205{
1206 struct drm_i915_private *dev_priv = engine->i915;
1207 u8 vals[3] = { 0, 0, 0 };
1208 unsigned int i;
1209
1210 for (i = 0; i < 3; i++) {
1211 u8 ss;
1212
1213 /*
1214 * Only consider slices where one, and only one, subslice has 7
1215 * EUs
1216 */
1217 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
1218 continue;
1219
1220 /*
1221 * subslice_7eu[i] != 0 (because of the check above) and
1222 * ss_max == 4 (maximum number of subslices possible per slice)
1223 *
1224 * -> 0 <= ss <= 3;
1225 */
1226 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
1227 vals[i] = 3 - ss;
1228 }
1229
1230 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
1231 return 0;
1232
1233 /* Tune IZ hashing. See intel_device_info_runtime_init() */
1234 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
1235 GEN9_IZ_HASHING_MASK(2) |
1236 GEN9_IZ_HASHING_MASK(1) |
1237 GEN9_IZ_HASHING_MASK(0),
1238 GEN9_IZ_HASHING(2, vals[2]) |
1239 GEN9_IZ_HASHING(1, vals[1]) |
1240 GEN9_IZ_HASHING(0, vals[0]));
1241
1242 return 0;
1243}
1244
1245static int skl_init_workarounds(struct intel_engine_cs *engine)
1246{
1247 struct drm_i915_private *dev_priv = engine->i915;
1248 int ret;
1249
1250 ret = gen9_init_workarounds(engine);
1251 if (ret)
1252 return ret;
1253
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001254 /* WaEnableGapsTsvCreditFix:skl */
1255 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1256 GEN9_GAPS_TSV_CREDIT_DISABLE));
1257
1258 /* WaDisableGafsUnitClkGating:skl */
Oscar Mateo4827c542017-09-07 08:40:07 -07001259 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1260 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001261
1262 /* WaInPlaceDecompressionHang:skl */
1263 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
Oscar Mateoefc886c2017-09-07 08:40:04 -07001264 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1265 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1266 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001267
1268 /* WaDisableLSQCROPERFforOCL:skl */
1269 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1270 if (ret)
1271 return ret;
1272
1273 return skl_tune_iz_hashing(engine);
1274}
1275
1276static int bxt_init_workarounds(struct intel_engine_cs *engine)
1277{
1278 struct drm_i915_private *dev_priv = engine->i915;
1279 int ret;
1280
1281 ret = gen9_init_workarounds(engine);
1282 if (ret)
1283 return ret;
1284
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001285 /* WaDisableThreadStallDopClockGating:bxt */
1286 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1287 STALL_DOP_GATING_DISABLE);
1288
1289 /* WaDisablePooledEuLoadBalancingFix:bxt */
Chris Wilson70a84f32017-11-14 13:43:40 +00001290 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1291 _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001292
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001293 /* WaToEnableHwFixForPushConstHWBug:bxt */
Chris Wilson70a84f32017-11-14 13:43:40 +00001294 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1295 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001296
1297 /* WaInPlaceDecompressionHang:bxt */
Chris Wilson70a84f32017-11-14 13:43:40 +00001298 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1299 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1300 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001301
1302 return 0;
1303}
1304
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001305static int cnl_init_workarounds(struct intel_engine_cs *engine)
1306{
1307 struct drm_i915_private *dev_priv = engine->i915;
1308 int ret;
1309
Oscar Mateo6cf20a02017-09-07 08:40:05 -07001310 /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
Rodrigo Vivi86ebb012017-08-29 16:07:51 -07001311 if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
Oscar Mateo6cf20a02017-09-07 08:40:05 -07001312 I915_WRITE(GAMT_CHKN_BIT_REG,
1313 (I915_READ(GAMT_CHKN_BIT_REG) |
1314 GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT));
Rodrigo Vivi86ebb012017-08-29 16:07:51 -07001315
Rodrigo Viviacfb5552017-08-23 13:35:04 -07001316 /* WaForceContextSaveRestoreNonCoherent:cnl */
1317 WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
1318 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
1319
Rodrigo Viviaa9f4c42017-09-06 15:03:25 -07001320 /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
1321 if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
1322 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
1323
Rodrigo Vivie6d1a4f2017-08-15 16:16:49 -07001324 /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
1325 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1326 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1327
Rodrigo Vivid1d24752017-08-15 16:16:50 -07001328 /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
1329 if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
1330 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1331 GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
1332
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001333 /* WaInPlaceDecompressionHang:cnl */
Oscar Mateoefc886c2017-09-07 08:40:04 -07001334 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1335 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1336 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001337
Oscar Mateo2cbecff2017-08-23 12:56:31 -07001338 /* WaPushConstantDereferenceHoldDisable:cnl */
Oscar Mateob27f5902017-09-07 08:40:06 -07001339 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
Oscar Mateo2cbecff2017-08-23 12:56:31 -07001340
Rodrigo Vivi392572f2017-08-29 16:07:23 -07001341 /* FtrEnableFastAnisoL1BankingFix: cnl */
1342 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
1343
Michał Winiarski5152def2017-10-03 21:34:46 +01001344 /* WaDisable3DMidCmdPreemption:cnl */
1345 WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
1346
1347 /* WaDisableGPGPUMidCmdPreemption:cnl */
1348 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
1349 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
1350
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001351 /* WaEnablePreemptionGranularityControlByUMD:cnl */
Jeff McGee1e998342017-10-03 21:34:45 +01001352 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1353 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001354 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
1355 if (ret)
1356 return ret;
1357
Rafael Antognollia2b16582017-12-15 16:11:17 -08001358 /* WaDisableEarlyEOT:cnl */
1359 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
1360
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001361 return 0;
1362}
1363
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001364static int kbl_init_workarounds(struct intel_engine_cs *engine)
1365{
1366 struct drm_i915_private *dev_priv = engine->i915;
1367 int ret;
1368
1369 ret = gen9_init_workarounds(engine);
1370 if (ret)
1371 return ret;
1372
1373 /* WaEnableGapsTsvCreditFix:kbl */
1374 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1375 GEN9_GAPS_TSV_CREDIT_DISABLE));
1376
1377 /* WaDisableDynamicCreditSharing:kbl */
1378 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
Oscar Mateoc6ea497c2017-09-07 08:40:08 -07001379 I915_WRITE(GAMT_CHKN_BIT_REG,
1380 (I915_READ(GAMT_CHKN_BIT_REG) |
1381 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001382
1383 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1384 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1385 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1386 HDC_FENCE_DEST_SLM_DISABLE);
1387
1388 /* WaToEnableHwFixForPushConstHWBug:kbl */
1389 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1390 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1391 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1392
1393 /* WaDisableGafsUnitClkGating:kbl */
Oscar Mateo4827c542017-09-07 08:40:07 -07001394 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1395 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001396
1397 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1398 WA_SET_BIT_MASKED(
1399 GEN7_HALF_SLICE_CHICKEN1,
1400 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1401
1402 /* WaInPlaceDecompressionHang:kbl */
Oscar Mateoefc886c2017-09-07 08:40:04 -07001403 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1404 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1405 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001406
1407 /* WaDisableLSQCROPERFforOCL:kbl */
1408 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1409 if (ret)
1410 return ret;
1411
1412 return 0;
1413}
1414
1415static int glk_init_workarounds(struct intel_engine_cs *engine)
1416{
1417 struct drm_i915_private *dev_priv = engine->i915;
1418 int ret;
1419
1420 ret = gen9_init_workarounds(engine);
1421 if (ret)
1422 return ret;
1423
Kenneth Graunkeab062632018-01-05 00:59:05 -08001424 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1425 ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1426 if (ret)
1427 return ret;
1428
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001429 /* WaToEnableHwFixForPushConstHWBug:glk */
1430 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1431 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1432
1433 return 0;
1434}
1435
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001436static int cfl_init_workarounds(struct intel_engine_cs *engine)
1437{
1438 struct drm_i915_private *dev_priv = engine->i915;
1439 int ret;
1440
1441 ret = gen9_init_workarounds(engine);
1442 if (ret)
1443 return ret;
1444
1445 /* WaEnableGapsTsvCreditFix:cfl */
1446 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1447 GEN9_GAPS_TSV_CREDIT_DISABLE));
1448
1449 /* WaToEnableHwFixForPushConstHWBug:cfl */
1450 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1451 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1452
1453 /* WaDisableGafsUnitClkGating:cfl */
Oscar Mateo4827c542017-09-07 08:40:07 -07001454 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1455 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001456
1457 /* WaDisableSbeCacheDispatchPortSharing:cfl */
1458 WA_SET_BIT_MASKED(
1459 GEN7_HALF_SLICE_CHICKEN1,
1460 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1461
1462 /* WaInPlaceDecompressionHang:cfl */
Oscar Mateoefc886c2017-09-07 08:40:04 -07001463 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1464 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1465 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001466
1467 return 0;
1468}
1469
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001470int init_workarounds_ring(struct intel_engine_cs *engine)
1471{
1472 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson02e012f2017-03-01 12:11:31 +00001473 int err;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001474
Tvrtko Ursulinae504be2018-01-19 10:00:03 +00001475 if (GEM_WARN_ON(engine->id != RCS))
1476 return -EINVAL;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001477
1478 dev_priv->workarounds.count = 0;
Chris Wilson02e012f2017-03-01 12:11:31 +00001479 dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001480
1481 if (IS_BROADWELL(dev_priv))
Chris Wilson02e012f2017-03-01 12:11:31 +00001482 err = bdw_init_workarounds(engine);
1483 else if (IS_CHERRYVIEW(dev_priv))
1484 err = chv_init_workarounds(engine);
1485 else if (IS_SKYLAKE(dev_priv))
1486 err = skl_init_workarounds(engine);
1487 else if (IS_BROXTON(dev_priv))
1488 err = bxt_init_workarounds(engine);
1489 else if (IS_KABYLAKE(dev_priv))
1490 err = kbl_init_workarounds(engine);
1491 else if (IS_GEMINILAKE(dev_priv))
1492 err = glk_init_workarounds(engine);
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001493 else if (IS_COFFEELAKE(dev_priv))
1494 err = cfl_init_workarounds(engine);
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001495 else if (IS_CANNONLAKE(dev_priv))
1496 err = cnl_init_workarounds(engine);
Chris Wilson02e012f2017-03-01 12:11:31 +00001497 else
1498 err = 0;
1499 if (err)
1500 return err;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001501
Chris Wilson02e012f2017-03-01 12:11:31 +00001502 DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
1503 engine->name, dev_priv->workarounds.count);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001504 return 0;
1505}
1506
Chris Wilsone61e0f52018-02-21 09:56:36 +00001507int intel_ring_workarounds_emit(struct i915_request *rq)
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001508{
Chris Wilsone61e0f52018-02-21 09:56:36 +00001509 struct i915_workarounds *w = &rq->i915->workarounds;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001510 u32 *cs;
1511 int ret, i;
1512
1513 if (w->count == 0)
1514 return 0;
1515
Chris Wilsone61e0f52018-02-21 09:56:36 +00001516 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001517 if (ret)
1518 return ret;
1519
Chris Wilsone61e0f52018-02-21 09:56:36 +00001520 cs = intel_ring_begin(rq, w->count * 2 + 2);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001521 if (IS_ERR(cs))
1522 return PTR_ERR(cs);
1523
1524 *cs++ = MI_LOAD_REGISTER_IMM(w->count);
1525 for (i = 0; i < w->count; i++) {
1526 *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
1527 *cs++ = w->reg[i].value;
1528 }
1529 *cs++ = MI_NOOP;
1530
Chris Wilsone61e0f52018-02-21 09:56:36 +00001531 intel_ring_advance(rq, cs);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001532
Chris Wilsone61e0f52018-02-21 09:56:36 +00001533 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001534 if (ret)
1535 return ret;
1536
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001537 return 0;
1538}
1539
Chris Wilsona091d4e2017-05-30 13:13:33 +01001540static bool ring_is_idle(struct intel_engine_cs *engine)
1541{
1542 struct drm_i915_private *dev_priv = engine->i915;
1543 bool idle = true;
1544
Chris Wilson74d00d22018-02-12 09:39:28 +00001545 /* If the whole device is asleep, the engine must be idle */
1546 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1547 return true;
Chris Wilsona091d4e2017-05-30 13:13:33 +01001548
Chris Wilsonaed2fc12017-05-30 13:13:34 +01001549 /* First check that no commands are left in the ring */
1550 if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
1551 (I915_READ_TAIL(engine) & TAIL_ADDR))
1552 idle = false;
1553
Chris Wilsona091d4e2017-05-30 13:13:33 +01001554 /* No bit for gen2, so assume the CS parser is idle */
1555 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1556 idle = false;
1557
1558 intel_runtime_pm_put(dev_priv);
1559
1560 return idle;
1561}
1562
Chris Wilson54003672017-03-03 12:19:46 +00001563/**
1564 * intel_engine_is_idle() - Report if the engine has finished process all work
1565 * @engine: the intel_engine_cs
1566 *
1567 * Return true if there are no requests pending, nothing left to be submitted
1568 * to hardware, and that the engine is idle.
1569 */
1570bool intel_engine_is_idle(struct intel_engine_cs *engine)
1571{
1572 struct drm_i915_private *dev_priv = engine->i915;
1573
Chris Wilsona8e9a412017-04-11 20:00:42 +01001574 /* More white lies, if wedged, hw state is inconsistent */
1575 if (i915_terminally_wedged(&dev_priv->gpu_error))
1576 return true;
1577
Chris Wilson54003672017-03-03 12:19:46 +00001578 /* Any inflight/incomplete requests? */
1579 if (!i915_seqno_passed(intel_engine_get_seqno(engine),
1580 intel_engine_last_submit(engine)))
1581 return false;
1582
Chris Wilson8968a362017-04-12 00:44:26 +01001583 if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
1584 return true;
1585
Chris Wilson4a118ec2017-10-23 22:32:36 +01001586 /* Waiting to drain ELSP? */
1587 if (READ_ONCE(engine->execlists.active))
Chris Wilson54003672017-03-03 12:19:46 +00001588 return false;
1589
Chris Wilsond6edb6e2017-07-21 13:32:24 +01001590 /* ELSP is empty, but there are ready requests? */
Mika Kuoppalab620e872017-09-22 15:43:03 +03001591 if (READ_ONCE(engine->execlists.first))
Chris Wilsond6edb6e2017-07-21 13:32:24 +01001592 return false;
1593
Chris Wilson54003672017-03-03 12:19:46 +00001594 /* Ring stopped? */
Chris Wilsona091d4e2017-05-30 13:13:33 +01001595 if (!ring_is_idle(engine))
Chris Wilson54003672017-03-03 12:19:46 +00001596 return false;
1597
1598 return true;
1599}
1600
Chris Wilson05425242017-03-03 12:19:47 +00001601bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
1602{
1603 struct intel_engine_cs *engine;
1604 enum intel_engine_id id;
1605
Chris Wilsond7dc4132017-12-12 13:21:48 +00001606 /*
1607 * If the driver is wedged, HW state may be very inconsistent and
Chris Wilson8490ae202017-03-30 15:50:37 +01001608 * report that it is still busy, even though we have stopped using it.
1609 */
1610 if (i915_terminally_wedged(&dev_priv->gpu_error))
1611 return true;
1612
Chris Wilson05425242017-03-03 12:19:47 +00001613 for_each_engine(engine, dev_priv, id) {
1614 if (!intel_engine_is_idle(engine))
1615 return false;
1616 }
1617
1618 return true;
1619}
1620
Chris Wilsonae6c4572017-11-10 14:26:28 +00001621/**
1622 * intel_engine_has_kernel_context:
1623 * @engine: the engine
1624 *
1625 * Returns true if the last context to be executed on this engine, or has been
1626 * executed if the engine is already idle, is the kernel context
1627 * (#i915.kernel_context).
1628 */
Chris Wilson20ccd4d2017-10-24 23:08:55 +01001629bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
1630{
Chris Wilsonae6c4572017-11-10 14:26:28 +00001631 const struct i915_gem_context * const kernel_context =
1632 engine->i915->kernel_context;
Chris Wilsone61e0f52018-02-21 09:56:36 +00001633 struct i915_request *rq;
Chris Wilsonae6c4572017-11-10 14:26:28 +00001634
1635 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1636
1637 /*
1638 * Check the last context seen by the engine. If active, it will be
1639 * the last request that remains in the timeline. When idle, it is
1640 * the last executed context as tracked by retirement.
1641 */
1642 rq = __i915_gem_active_peek(&engine->timeline->last_request);
1643 if (rq)
1644 return rq->ctx == kernel_context;
1645 else
1646 return engine->last_retired_context == kernel_context;
Chris Wilson20ccd4d2017-10-24 23:08:55 +01001647}
1648
Chris Wilsonff44ad52017-03-16 17:13:03 +00001649void intel_engines_reset_default_submission(struct drm_i915_private *i915)
1650{
1651 struct intel_engine_cs *engine;
1652 enum intel_engine_id id;
1653
1654 for_each_engine(engine, i915, id)
1655 engine->set_default_submission(engine);
1656}
1657
Chris Wilsonaba5e272017-10-25 15:39:41 +01001658/**
1659 * intel_engines_park: called when the GT is transitioning from busy->idle
1660 * @i915: the i915 device
1661 *
1662 * The GT is now idle and about to go to sleep (maybe never to wake again?).
1663 * Time for us to tidy and put away our toys (release resources back to the
1664 * system).
1665 */
1666void intel_engines_park(struct drm_i915_private *i915)
Chris Wilson6c067572017-05-17 13:10:03 +01001667{
1668 struct intel_engine_cs *engine;
1669 enum intel_engine_id id;
1670
1671 for_each_engine(engine, i915, id) {
Chris Wilson820c5bb2017-11-01 20:21:49 +00001672 /* Flush the residual irq tasklets first. */
1673 intel_engine_disarm_breadcrumbs(engine);
Sagar Arun Kamblec6dce8f2017-11-16 19:02:37 +05301674 tasklet_kill(&engine->execlists.tasklet);
Chris Wilson820c5bb2017-11-01 20:21:49 +00001675
Chris Wilson32651242017-10-27 12:06:17 +01001676 /*
1677 * We are committed now to parking the engines, make sure there
1678 * will be no more interrupts arriving later and the engines
1679 * are truly idle.
1680 */
Chris Wilson30b29402017-11-10 11:25:50 +00001681 if (wait_for(intel_engine_is_idle(engine), 10)) {
Chris Wilson32651242017-10-27 12:06:17 +01001682 struct drm_printer p = drm_debug_printer(__func__);
1683
Chris Wilson30b29402017-11-10 11:25:50 +00001684 dev_err(i915->drm.dev,
1685 "%s is not idle before parking\n",
1686 engine->name);
Chris Wilson0db18b12017-12-08 01:23:00 +00001687 intel_engine_dump(engine, &p, NULL);
Chris Wilson32651242017-10-27 12:06:17 +01001688 }
1689
Chris Wilsonaba5e272017-10-25 15:39:41 +01001690 if (engine->park)
1691 engine->park(engine);
1692
Chris Wilsonaba5e272017-10-25 15:39:41 +01001693 i915_gem_batch_pool_fini(&engine->batch_pool);
Mika Kuoppalab620e872017-09-22 15:43:03 +03001694 engine->execlists.no_priolist = false;
Chris Wilson6c067572017-05-17 13:10:03 +01001695 }
1696}
1697
Chris Wilsonaba5e272017-10-25 15:39:41 +01001698/**
1699 * intel_engines_unpark: called when the GT is transitioning from idle->busy
1700 * @i915: the i915 device
1701 *
1702 * The GT was idle and now about to fire up with some new user requests.
1703 */
1704void intel_engines_unpark(struct drm_i915_private *i915)
1705{
1706 struct intel_engine_cs *engine;
1707 enum intel_engine_id id;
1708
1709 for_each_engine(engine, i915, id) {
1710 if (engine->unpark)
1711 engine->unpark(engine);
1712 }
1713}
1714
Chris Wilson90cad092017-09-06 16:28:59 +01001715bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1716{
1717 switch (INTEL_GEN(engine->i915)) {
1718 case 2:
1719 return false; /* uses physical not virtual addresses */
1720 case 3:
1721 /* maybe only uses physical not virtual addresses */
1722 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1723 case 6:
1724 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1725 default:
1726 return true;
1727 }
1728}
1729
Chris Wilsond2b4b972017-11-10 14:26:33 +00001730unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
1731{
1732 struct intel_engine_cs *engine;
1733 enum intel_engine_id id;
1734 unsigned int which;
1735
1736 which = 0;
1737 for_each_engine(engine, i915, id)
1738 if (engine->default_state)
1739 which |= BIT(engine->uabi_class);
1740
1741 return which;
1742}
1743
Chris Wilsonf636edb2017-10-09 12:02:57 +01001744static void print_request(struct drm_printer *m,
Chris Wilsone61e0f52018-02-21 09:56:36 +00001745 struct i915_request *rq,
Chris Wilsonf636edb2017-10-09 12:02:57 +01001746 const char *prefix)
1747{
Chris Wilsonab268152018-03-14 10:16:30 +00001748 const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
1749
Chris Wilson367a35a2018-02-28 09:47:32 +00001750 drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
Chris Wilsona27d5a42017-10-15 21:43:10 +01001751 rq->global_seqno,
Chris Wilsone61e0f52018-02-21 09:56:36 +00001752 i915_request_completed(rq) ? "!" : "",
Chris Wilson367a35a2018-02-28 09:47:32 +00001753 rq->fence.context, rq->fence.seqno,
Chris Wilsonf636edb2017-10-09 12:02:57 +01001754 rq->priotree.priority,
1755 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
Chris Wilsonab268152018-03-14 10:16:30 +00001756 name);
Chris Wilsonf636edb2017-10-09 12:02:57 +01001757}
1758
Chris Wilsonc1bf2722017-12-22 18:25:21 +00001759static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1760{
1761 const size_t rowsize = 8 * sizeof(u32);
1762 const void *prev = NULL;
1763 bool skip = false;
1764 size_t pos;
1765
1766 for (pos = 0; pos < len; pos += rowsize) {
1767 char line[128];
1768
1769 if (prev && !memcmp(prev, buf + pos, rowsize)) {
1770 if (!skip) {
1771 drm_printf(m, "*\n");
1772 skip = true;
1773 }
1774 continue;
1775 }
1776
1777 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1778 rowsize, sizeof(u32),
1779 line, sizeof(line),
1780 false) >= sizeof(line));
1781 drm_printf(m, "%08zx %s\n", pos, line);
1782
1783 prev = buf + pos;
1784 skip = false;
1785 }
1786}
1787
Chris Wilson3ceda3a2018-02-12 10:24:15 +00001788static void intel_engine_print_registers(const struct intel_engine_cs *engine,
1789 struct drm_printer *m)
Chris Wilsonf636edb2017-10-09 12:02:57 +01001790{
Chris Wilsonf636edb2017-10-09 12:02:57 +01001791 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson3ceda3a2018-02-12 10:24:15 +00001792 const struct intel_engine_execlists * const execlists =
1793 &engine->execlists;
Chris Wilsonf636edb2017-10-09 12:02:57 +01001794 u64 addr;
1795
Chris Wilson3ceda3a2018-02-12 10:24:15 +00001796 drm_printf(m, "\tRING_START: 0x%08x\n",
1797 I915_READ(RING_START(engine->mmio_base)));
1798 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
1799 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
1800 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
1801 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
Chris Wilson3c75de52017-10-26 12:50:48 +01001802 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
Chris Wilsonf636edb2017-10-09 12:02:57 +01001803 I915_READ(RING_CTL(engine->mmio_base)),
Chris Wilson3c75de52017-10-26 12:50:48 +01001804 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1805 if (INTEL_GEN(engine->i915) > 2) {
1806 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
1807 I915_READ(RING_MI_MODE(engine->mmio_base)),
1808 I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
1809 }
Chris Wilson3ceda3a2018-02-12 10:24:15 +00001810
1811 if (INTEL_GEN(dev_priv) >= 6) {
1812 drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
1813 }
1814
Chris Wilson93c6e962017-11-20 20:55:04 +00001815 if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
Chris Wilsonaf9ff6c2017-11-20 20:55:03 +00001816 drm_printf(m, "\tSYNC_0: 0x%08x\n",
1817 I915_READ(RING_SYNC_0(engine->mmio_base)));
1818 drm_printf(m, "\tSYNC_1: 0x%08x\n",
1819 I915_READ(RING_SYNC_1(engine->mmio_base)));
1820 if (HAS_VEBOX(dev_priv))
1821 drm_printf(m, "\tSYNC_2: 0x%08x\n",
1822 I915_READ(RING_SYNC_2(engine->mmio_base)));
1823 }
Chris Wilsonf636edb2017-10-09 12:02:57 +01001824
Chris Wilsonf636edb2017-10-09 12:02:57 +01001825 addr = intel_engine_get_active_head(engine);
1826 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1827 upper_32_bits(addr), lower_32_bits(addr));
1828 addr = intel_engine_get_last_batch_head(engine);
1829 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1830 upper_32_bits(addr), lower_32_bits(addr));
Chris Wilsona0cf5792017-12-18 12:39:14 +00001831 if (INTEL_GEN(dev_priv) >= 8)
1832 addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
1833 RING_DMA_FADD_UDW(engine->mmio_base));
1834 else if (INTEL_GEN(dev_priv) >= 4)
1835 addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
1836 else
1837 addr = I915_READ(DMA_FADD_I8XX);
1838 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1839 upper_32_bits(addr), lower_32_bits(addr));
1840 if (INTEL_GEN(dev_priv) >= 4) {
1841 drm_printf(m, "\tIPEIR: 0x%08x\n",
1842 I915_READ(RING_IPEIR(engine->mmio_base)));
1843 drm_printf(m, "\tIPEHR: 0x%08x\n",
1844 I915_READ(RING_IPEHR(engine->mmio_base)));
1845 } else {
1846 drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
1847 drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
1848 }
Chris Wilsonf636edb2017-10-09 12:02:57 +01001849
Chris Wilsonfb5c5512017-11-20 20:55:00 +00001850 if (HAS_EXECLISTS(dev_priv)) {
Chris Wilsonf636edb2017-10-09 12:02:57 +01001851 const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
Chris Wilsonf636edb2017-10-09 12:02:57 +01001852 u32 ptr, read, write;
1853 unsigned int idx;
1854
1855 drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
1856 I915_READ(RING_EXECLIST_STATUS_LO(engine)),
1857 I915_READ(RING_EXECLIST_STATUS_HI(engine)));
1858
1859 ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
1860 read = GEN8_CSB_READ_PTR(ptr);
1861 write = GEN8_CSB_WRITE_PTR(ptr);
Chris Wilson90408712018-03-26 12:50:36 +01001862 drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n",
Chris Wilsonf636edb2017-10-09 12:02:57 +01001863 read, execlists->csb_head,
1864 write,
1865 intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
1866 yesno(test_bit(ENGINE_IRQ_EXECLIST,
Chris Wilson90408712018-03-26 12:50:36 +01001867 &engine->irq_posted)),
1868 yesno(test_bit(TASKLET_STATE_SCHED,
1869 &engine->execlists.tasklet.state)),
1870 enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
Chris Wilsonf636edb2017-10-09 12:02:57 +01001871 if (read >= GEN8_CSB_ENTRIES)
1872 read = 0;
1873 if (write >= GEN8_CSB_ENTRIES)
1874 write = 0;
1875 if (read > write)
1876 write += GEN8_CSB_ENTRIES;
1877 while (read < write) {
1878 idx = ++read % GEN8_CSB_ENTRIES;
1879 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
1880 idx,
1881 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
1882 hws[idx * 2],
1883 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
1884 hws[idx * 2 + 1]);
1885 }
1886
1887 rcu_read_lock();
1888 for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
Chris Wilsone61e0f52018-02-21 09:56:36 +00001889 struct i915_request *rq;
Chris Wilsonf636edb2017-10-09 12:02:57 +01001890 unsigned int count;
1891
1892 rq = port_unpack(&execlists->port[idx], &count);
1893 if (rq) {
Chris Wilson3ceda3a2018-02-12 10:24:15 +00001894 char hdr[80];
1895
Chris Wilsone8a70ca2017-12-08 01:22:59 +00001896 snprintf(hdr, sizeof(hdr),
1897 "\t\tELSP[%d] count=%d, rq: ",
1898 idx, count);
1899 print_request(m, rq, hdr);
Chris Wilsonf636edb2017-10-09 12:02:57 +01001900 } else {
Chris Wilsone8a70ca2017-12-08 01:22:59 +00001901 drm_printf(m, "\t\tELSP[%d] idle\n", idx);
Chris Wilsonf636edb2017-10-09 12:02:57 +01001902 }
1903 }
Chris Wilson4a118ec2017-10-23 22:32:36 +01001904 drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
Chris Wilsonf636edb2017-10-09 12:02:57 +01001905 rcu_read_unlock();
Chris Wilsonf636edb2017-10-09 12:02:57 +01001906 } else if (INTEL_GEN(dev_priv) > 6) {
1907 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1908 I915_READ(RING_PP_DIR_BASE(engine)));
1909 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1910 I915_READ(RING_PP_DIR_BASE_READ(engine)));
1911 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1912 I915_READ(RING_PP_DIR_DCLV(engine)));
1913 }
Chris Wilson3ceda3a2018-02-12 10:24:15 +00001914}
1915
1916void intel_engine_dump(struct intel_engine_cs *engine,
1917 struct drm_printer *m,
1918 const char *header, ...)
1919{
1920 struct intel_breadcrumbs * const b = &engine->breadcrumbs;
1921 const struct intel_engine_execlists * const execlists = &engine->execlists;
1922 struct i915_gpu_error * const error = &engine->i915->gpu_error;
Chris Wilsone61e0f52018-02-21 09:56:36 +00001923 struct i915_request *rq;
Chris Wilson3ceda3a2018-02-12 10:24:15 +00001924 struct rb_node *rb;
1925
1926 if (header) {
1927 va_list ap;
1928
1929 va_start(ap, header);
1930 drm_vprintf(m, header, &ap);
1931 va_end(ap);
1932 }
1933
1934 if (i915_terminally_wedged(&engine->i915->gpu_error))
1935 drm_printf(m, "*** WEDGED ***\n");
1936
1937 drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
1938 intel_engine_get_seqno(engine),
1939 intel_engine_last_submit(engine),
1940 engine->hangcheck.seqno,
1941 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
1942 engine->timeline->inflight_seqnos);
1943 drm_printf(m, "\tReset count: %d (global %d)\n",
1944 i915_reset_engine_count(error, engine),
1945 i915_reset_count(error));
1946
1947 rcu_read_lock();
1948
1949 drm_printf(m, "\tRequests:\n");
1950
1951 rq = list_first_entry(&engine->timeline->requests,
Chris Wilsone61e0f52018-02-21 09:56:36 +00001952 struct i915_request, link);
Chris Wilson3ceda3a2018-02-12 10:24:15 +00001953 if (&rq->link != &engine->timeline->requests)
1954 print_request(m, rq, "\t\tfirst ");
1955
1956 rq = list_last_entry(&engine->timeline->requests,
Chris Wilsone61e0f52018-02-21 09:56:36 +00001957 struct i915_request, link);
Chris Wilson3ceda3a2018-02-12 10:24:15 +00001958 if (&rq->link != &engine->timeline->requests)
1959 print_request(m, rq, "\t\tlast ");
1960
1961 rq = i915_gem_find_active_request(engine);
1962 if (rq) {
1963 print_request(m, rq, "\t\tactive ");
1964 drm_printf(m,
1965 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
1966 rq->head, rq->postfix, rq->tail,
1967 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1968 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
Chris Wilsonef5032a2018-03-07 13:42:24 +00001969 drm_printf(m, "\t\tring->start: 0x%08x\n",
Chris Wilson3ceda3a2018-02-12 10:24:15 +00001970 i915_ggtt_offset(rq->ring->vma));
Chris Wilsonef5032a2018-03-07 13:42:24 +00001971 drm_printf(m, "\t\tring->head: 0x%08x\n",
Chris Wilson3ceda3a2018-02-12 10:24:15 +00001972 rq->ring->head);
Chris Wilsonef5032a2018-03-07 13:42:24 +00001973 drm_printf(m, "\t\tring->tail: 0x%08x\n",
Chris Wilson3ceda3a2018-02-12 10:24:15 +00001974 rq->ring->tail);
Chris Wilsonef5032a2018-03-07 13:42:24 +00001975 drm_printf(m, "\t\tring->emit: 0x%08x\n",
1976 rq->ring->emit);
1977 drm_printf(m, "\t\tring->space: 0x%08x\n",
1978 rq->ring->space);
Chris Wilson3ceda3a2018-02-12 10:24:15 +00001979 }
1980
1981 rcu_read_unlock();
1982
1983 if (intel_runtime_pm_get_if_in_use(engine->i915)) {
1984 intel_engine_print_registers(engine, m);
1985 intel_runtime_pm_put(engine->i915);
1986 } else {
1987 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1988 }
Chris Wilsonf636edb2017-10-09 12:02:57 +01001989
Chris Wilsona27d5a42017-10-15 21:43:10 +01001990 spin_lock_irq(&engine->timeline->lock);
1991 list_for_each_entry(rq, &engine->timeline->requests, link)
1992 print_request(m, rq, "\t\tE ");
Chris Wilsonf6322ed2018-02-22 14:22:29 +00001993 drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
Chris Wilsona27d5a42017-10-15 21:43:10 +01001994 for (rb = execlists->first; rb; rb = rb_next(rb)) {
1995 struct i915_priolist *p =
1996 rb_entry(rb, typeof(*p), node);
1997
1998 list_for_each_entry(rq, &p->requests, priotree.link)
1999 print_request(m, rq, "\t\tQ ");
2000 }
2001 spin_unlock_irq(&engine->timeline->lock);
2002
Chris Wilsonf636edb2017-10-09 12:02:57 +01002003 spin_lock_irq(&b->rb_lock);
2004 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
2005 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
2006
2007 drm_printf(m, "\t%s [%d] waiting for %x\n",
2008 w->tsk->comm, w->tsk->pid, w->seqno);
2009 }
2010 spin_unlock_irq(&b->rb_lock);
2011
Chris Wilson832265d2017-12-08 01:23:01 +00002012 drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
2013 engine->irq_posted,
2014 yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
2015 &engine->irq_posted)),
2016 yesno(test_bit(ENGINE_IRQ_EXECLIST,
2017 &engine->irq_posted)));
Chris Wilsonc1bf2722017-12-22 18:25:21 +00002018
2019 drm_printf(m, "HWSP:\n");
2020 hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
2021
Chris Wilsonc400cc22017-11-07 15:22:11 +00002022 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
Chris Wilsonf636edb2017-10-09 12:02:57 +01002023}
2024
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00002025static u8 user_class_map[] = {
2026 [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
2027 [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
2028 [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
2029 [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
2030};
2031
2032struct intel_engine_cs *
2033intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
2034{
2035 if (class >= ARRAY_SIZE(user_class_map))
2036 return NULL;
2037
2038 class = user_class_map[class];
2039
2040 GEM_BUG_ON(class > MAX_ENGINE_CLASS);
2041
2042 if (instance > MAX_ENGINE_INSTANCE)
2043 return NULL;
2044
2045 return i915->engine_class[class][instance];
2046}
2047
Tvrtko Ursulin30e17b72017-11-21 18:18:48 +00002048/**
2049 * intel_enable_engine_stats() - Enable engine busy tracking on engine
2050 * @engine: engine to enable stats collection
2051 *
2052 * Start collecting the engine busyness data for @engine.
2053 *
2054 * Returns 0 on success or a negative error code.
2055 */
2056int intel_enable_engine_stats(struct intel_engine_cs *engine)
2057{
Chris Wilson99e48bf2018-01-15 09:20:41 +00002058 struct intel_engine_execlists *execlists = &engine->execlists;
Tvrtko Ursulin30e17b72017-11-21 18:18:48 +00002059 unsigned long flags;
Chris Wilson99e48bf2018-01-15 09:20:41 +00002060 int err = 0;
Tvrtko Ursulin30e17b72017-11-21 18:18:48 +00002061
Tvrtko Ursulincf669b42017-11-29 10:28:05 +00002062 if (!intel_engine_supports_stats(engine))
Tvrtko Ursulin30e17b72017-11-21 18:18:48 +00002063 return -ENODEV;
2064
Chris Wilson99e48bf2018-01-15 09:20:41 +00002065 tasklet_disable(&execlists->tasklet);
Tvrtko Ursulin30e17b72017-11-21 18:18:48 +00002066 spin_lock_irqsave(&engine->stats.lock, flags);
Chris Wilson99e48bf2018-01-15 09:20:41 +00002067
2068 if (unlikely(engine->stats.enabled == ~0)) {
2069 err = -EBUSY;
2070 goto unlock;
2071 }
2072
Chris Wilson49007272018-01-11 07:30:31 +00002073 if (engine->stats.enabled++ == 0) {
Chris Wilson49007272018-01-11 07:30:31 +00002074 const struct execlist_port *port = execlists->port;
2075 unsigned int num_ports = execlists_num_ports(execlists);
2076
Tvrtko Ursulin30e17b72017-11-21 18:18:48 +00002077 engine->stats.enabled_at = ktime_get();
Chris Wilson49007272018-01-11 07:30:31 +00002078
2079 /* XXX submission method oblivious? */
2080 while (num_ports-- && port_isset(port)) {
2081 engine->stats.active++;
2082 port++;
2083 }
2084
2085 if (engine->stats.active)
2086 engine->stats.start = engine->stats.enabled_at;
2087 }
Chris Wilson99e48bf2018-01-15 09:20:41 +00002088
2089unlock:
Tvrtko Ursulin30e17b72017-11-21 18:18:48 +00002090 spin_unlock_irqrestore(&engine->stats.lock, flags);
Chris Wilson99e48bf2018-01-15 09:20:41 +00002091 tasklet_enable(&execlists->tasklet);
Tvrtko Ursulin30e17b72017-11-21 18:18:48 +00002092
Chris Wilson99e48bf2018-01-15 09:20:41 +00002093 return err;
Tvrtko Ursulin30e17b72017-11-21 18:18:48 +00002094}
2095
2096static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
2097{
2098 ktime_t total = engine->stats.total;
2099
2100 /*
2101 * If the engine is executing something at the moment
2102 * add it to the total.
2103 */
2104 if (engine->stats.active)
2105 total = ktime_add(total,
2106 ktime_sub(ktime_get(), engine->stats.start));
2107
2108 return total;
2109}
2110
2111/**
2112 * intel_engine_get_busy_time() - Return current accumulated engine busyness
2113 * @engine: engine to report on
2114 *
2115 * Returns accumulated time @engine was busy since engine stats were enabled.
2116 */
2117ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
2118{
2119 ktime_t total;
2120 unsigned long flags;
2121
2122 spin_lock_irqsave(&engine->stats.lock, flags);
2123 total = __intel_engine_get_busy_time(engine);
2124 spin_unlock_irqrestore(&engine->stats.lock, flags);
2125
2126 return total;
2127}
2128
2129/**
2130 * intel_disable_engine_stats() - Disable engine busy tracking on engine
2131 * @engine: engine to disable stats collection
2132 *
2133 * Stops collecting the engine busyness data for @engine.
2134 */
2135void intel_disable_engine_stats(struct intel_engine_cs *engine)
2136{
2137 unsigned long flags;
2138
Tvrtko Ursulincf669b42017-11-29 10:28:05 +00002139 if (!intel_engine_supports_stats(engine))
Tvrtko Ursulin30e17b72017-11-21 18:18:48 +00002140 return;
2141
2142 spin_lock_irqsave(&engine->stats.lock, flags);
2143 WARN_ON_ONCE(engine->stats.enabled == 0);
2144 if (--engine->stats.enabled == 0) {
2145 engine->stats.total = __intel_engine_get_busy_time(engine);
2146 engine->stats.active = 0;
2147 }
2148 spin_unlock_irqrestore(&engine->stats.lock, flags);
2149}
2150
Chris Wilsonf97fbf92017-02-13 17:15:14 +00002151#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2152#include "selftests/mock_engine.c"
Daniele Ceraolo Spurio74419da2018-03-14 11:26:51 -07002153#include "selftests/intel_engine_cs.c"
Chris Wilsonf97fbf92017-02-13 17:15:14 +00002154#endif