blob: 3b46c1f7b88b3894ff308eea34cf5a480121a128 [file] [log] [blame]
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_drv.h"
26#include "intel_ringbuffer.h"
27#include "intel_lrc.h"
28
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +030029/* Haswell does have the CXT_SIZE register however it does not appear to be
30 * valid. Now, docs explain in dwords what is in the context object. The full
31 * size is 70720 bytes, however, the power context and execlist context will
32 * never be saved (power context is stored elsewhere, and execlists don't work
33 * on HSW) - so the final size, including the extra state required for the
34 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
35 */
36#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
37/* Same as Haswell, but 72064 bytes now. */
38#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
39
40#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
41#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
42
43#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
44
Oscar Mateob8400f02017-04-10 07:34:32 -070045struct engine_class_info {
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010046 const char *name;
Oscar Mateob8400f02017-04-10 07:34:32 -070047 int (*init_legacy)(struct intel_engine_cs *engine);
48 int (*init_execlists)(struct intel_engine_cs *engine);
49};
50
51static const struct engine_class_info intel_engine_classes[] = {
52 [RENDER_CLASS] = {
53 .name = "rcs",
54 .init_execlists = logical_render_ring_init,
55 .init_legacy = intel_init_render_ring_buffer,
56 },
57 [COPY_ENGINE_CLASS] = {
58 .name = "bcs",
59 .init_execlists = logical_xcs_ring_init,
60 .init_legacy = intel_init_blt_ring_buffer,
61 },
62 [VIDEO_DECODE_CLASS] = {
63 .name = "vcs",
64 .init_execlists = logical_xcs_ring_init,
65 .init_legacy = intel_init_bsd_ring_buffer,
66 },
67 [VIDEO_ENHANCEMENT_CLASS] = {
68 .name = "vecs",
69 .init_execlists = logical_xcs_ring_init,
70 .init_legacy = intel_init_vebox_ring_buffer,
71 },
72};
73
74struct engine_info {
Michal Wajdeczko237ae7c2017-03-01 20:26:15 +000075 unsigned int hw_id;
Chris Wilson1d39f282017-04-11 13:43:06 +010076 unsigned int uabi_id;
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -070077 u8 class;
78 u8 instance;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010079 u32 mmio_base;
80 unsigned irq_shift;
Oscar Mateob8400f02017-04-10 07:34:32 -070081};
82
83static const struct engine_info intel_engines[] = {
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010084 [RCS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010085 .hw_id = RCS_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +010086 .uabi_id = I915_EXEC_RENDER,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -070087 .class = RENDER_CLASS,
88 .instance = 0,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010089 .mmio_base = RENDER_RING_BASE,
90 .irq_shift = GEN8_RCS_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010091 },
92 [BCS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010093 .hw_id = BCS_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +010094 .uabi_id = I915_EXEC_BLT,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -070095 .class = COPY_ENGINE_CLASS,
96 .instance = 0,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010097 .mmio_base = BLT_RING_BASE,
98 .irq_shift = GEN8_BCS_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010099 },
100 [VCS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100101 .hw_id = VCS_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +0100102 .uabi_id = I915_EXEC_BSD,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700103 .class = VIDEO_DECODE_CLASS,
104 .instance = 0,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100105 .mmio_base = GEN6_BSD_RING_BASE,
106 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100107 },
108 [VCS2] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100109 .hw_id = VCS2_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +0100110 .uabi_id = I915_EXEC_BSD,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700111 .class = VIDEO_DECODE_CLASS,
112 .instance = 1,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100113 .mmio_base = GEN8_BSD2_RING_BASE,
114 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100115 },
116 [VECS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100117 .hw_id = VECS_HW,
Chris Wilson1d39f282017-04-11 13:43:06 +0100118 .uabi_id = I915_EXEC_VEBOX,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700119 .class = VIDEO_ENHANCEMENT_CLASS,
120 .instance = 0,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100121 .mmio_base = VEBOX_RING_BASE,
122 .irq_shift = GEN8_VECS_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100123 },
124};
125
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300126/**
127 * ___intel_engine_context_size() - return the size of the context for an engine
128 * @dev_priv: i915 device private
129 * @class: engine class
130 *
131 * Each engine class may require a different amount of space for a context
132 * image.
133 *
134 * Return: size (in bytes) of an engine class specific context image
135 *
136 * Note: this size includes the HWSP, which is part of the context image
137 * in LRC mode, but does not include the "shared data page" used with
138 * GuC submission. The caller should account for this if using the GuC.
139 */
140static u32
141__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
142{
143 u32 cxt_size;
144
145 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
146
147 switch (class) {
148 case RENDER_CLASS:
149 switch (INTEL_GEN(dev_priv)) {
150 default:
151 MISSING_CASE(INTEL_GEN(dev_priv));
152 case 9:
153 return GEN9_LR_CONTEXT_RENDER_SIZE;
154 case 8:
155 return i915.enable_execlists ?
156 GEN8_LR_CONTEXT_RENDER_SIZE :
157 GEN8_CXT_TOTAL_SIZE;
158 case 7:
159 if (IS_HASWELL(dev_priv))
160 return HSW_CXT_TOTAL_SIZE;
161
162 cxt_size = I915_READ(GEN7_CXT_SIZE);
163 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
164 PAGE_SIZE);
165 case 6:
166 cxt_size = I915_READ(CXT_SIZE);
167 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
168 PAGE_SIZE);
169 case 5:
170 case 4:
171 case 3:
172 case 2:
173 /* For the special day when i810 gets merged. */
174 case 1:
175 return 0;
176 }
177 break;
178 default:
179 MISSING_CASE(class);
180 case VIDEO_DECODE_CLASS:
181 case VIDEO_ENHANCEMENT_CLASS:
182 case COPY_ENGINE_CLASS:
183 if (INTEL_GEN(dev_priv) < 8)
184 return 0;
185 return GEN8_LR_CONTEXT_OTHER_SIZE;
186 }
187}
188
Akash Goel3b3f1652016-10-13 22:44:48 +0530189static int
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100190intel_engine_setup(struct drm_i915_private *dev_priv,
191 enum intel_engine_id id)
192{
193 const struct engine_info *info = &intel_engines[id];
Oscar Mateob8400f02017-04-10 07:34:32 -0700194 const struct engine_class_info *class_info;
Akash Goel3b3f1652016-10-13 22:44:48 +0530195 struct intel_engine_cs *engine;
196
Oscar Mateob8400f02017-04-10 07:34:32 -0700197 GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
198 class_info = &intel_engine_classes[info->class];
199
Akash Goel3b3f1652016-10-13 22:44:48 +0530200 GEM_BUG_ON(dev_priv->engine[id]);
201 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
202 if (!engine)
203 return -ENOMEM;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100204
205 engine->id = id;
206 engine->i915 = dev_priv;
Oscar Mateo6e516142017-04-10 07:34:31 -0700207 WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
Oscar Mateob8400f02017-04-10 07:34:32 -0700208 class_info->name, info->instance) >=
209 sizeof(engine->name));
Chris Wilson1d39f282017-04-11 13:43:06 +0100210 engine->uabi_id = info->uabi_id;
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100211 engine->hw_id = engine->guc_id = info->hw_id;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100212 engine->mmio_base = info->mmio_base;
213 engine->irq_shift = info->irq_shift;
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700214 engine->class = info->class;
215 engine->instance = info->instance;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100216
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300217 engine->context_size = __intel_engine_context_size(dev_priv,
218 engine->class);
219 if (WARN_ON(engine->context_size > BIT(20)))
220 engine->context_size = 0;
221
Chris Wilson0de91362016-11-14 20:41:01 +0000222 /* Nothing to do here, execute in order of dependencies */
223 engine->schedule = NULL;
224
Changbin Du3fc03062017-03-13 10:47:11 +0800225 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
226
Akash Goel3b3f1652016-10-13 22:44:48 +0530227 dev_priv->engine[id] = engine;
228 return 0;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100229}
230
231/**
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300232 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +0000233 * @dev_priv: i915 device private
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100234 *
235 * Return: non-zero if the initialization failed.
236 */
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300237int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100238{
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100239 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
Chris Wilson5f9be052017-04-11 17:56:58 +0100240 const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
Akash Goel3b3f1652016-10-13 22:44:48 +0530241 struct intel_engine_cs *engine;
242 enum intel_engine_id id;
Chris Wilson5f9be052017-04-11 17:56:58 +0100243 unsigned int mask = 0;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100244 unsigned int i;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000245 int err;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100246
Tvrtko Ursulin70006ad2016-10-13 11:02:56 +0100247 WARN_ON(ring_mask == 0);
248 WARN_ON(ring_mask &
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100249 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
250
251 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
252 if (!HAS_ENGINE(dev_priv, i))
253 continue;
254
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000255 err = intel_engine_setup(dev_priv, i);
256 if (err)
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100257 goto cleanup;
258
259 mask |= ENGINE_MASK(i);
260 }
261
262 /*
263 * Catch failures to update intel_engines table when the new engines
264 * are added to the driver by a warning and disabling the forgotten
265 * engines.
266 */
Tvrtko Ursulin70006ad2016-10-13 11:02:56 +0100267 if (WARN_ON(mask != ring_mask))
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100268 device_info->ring_mask = mask;
269
Chris Wilson5f9be052017-04-11 17:56:58 +0100270 /* We always presume we have at least RCS available for later probing */
271 if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
272 err = -ENODEV;
273 goto cleanup;
274 }
275
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100276 device_info->num_rings = hweight32(mask);
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100277
278 return 0;
279
280cleanup:
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000281 for_each_engine(engine, dev_priv, id)
282 kfree(engine);
283 return err;
284}
285
286/**
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300287 * intel_engines_init() - init the Engine Command Streamers
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000288 * @dev_priv: i915 device private
289 *
290 * Return: non-zero if the initialization failed.
291 */
292int intel_engines_init(struct drm_i915_private *dev_priv)
293{
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000294 struct intel_engine_cs *engine;
295 enum intel_engine_id id, err_id;
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100296 int err;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000297
Akash Goel3b3f1652016-10-13 22:44:48 +0530298 for_each_engine(engine, dev_priv, id) {
Oscar Mateob8400f02017-04-10 07:34:32 -0700299 const struct engine_class_info *class_info =
300 &intel_engine_classes[engine->class];
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000301 int (*init)(struct intel_engine_cs *engine);
302
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100303 if (i915.enable_execlists)
Oscar Mateob8400f02017-04-10 07:34:32 -0700304 init = class_info->init_execlists;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000305 else
Oscar Mateob8400f02017-04-10 07:34:32 -0700306 init = class_info->init_legacy;
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100307
308 err = -EINVAL;
309 err_id = id;
310
311 if (GEM_WARN_ON(!init))
312 goto cleanup;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000313
314 err = init(engine);
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100315 if (err)
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000316 goto cleanup;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000317
Chris Wilsonff44ad52017-03-16 17:13:03 +0000318 GEM_BUG_ON(!engine->submit_request);
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000319 }
320
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000321 return 0;
322
323cleanup:
324 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100325 if (id >= err_id) {
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000326 kfree(engine);
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100327 dev_priv->engine[id] = NULL;
328 } else {
Tvrtko Ursulin8ee7c6e2017-02-16 12:23:22 +0000329 dev_priv->gt.cleanup_engine(engine);
Tvrtko Ursulin33def1f2017-06-16 14:03:38 +0100330 }
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100331 }
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000332 return err;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100333}
334
Chris Wilson73cb9702016-10-28 13:58:46 +0100335void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
Chris Wilson57f275a2016-08-15 10:49:00 +0100336{
337 struct drm_i915_private *dev_priv = engine->i915;
338
Chris Wilson2ca9faa2017-04-05 16:30:54 +0100339 GEM_BUG_ON(!intel_engine_is_idle(engine));
Chris Wilson546cdbc2017-04-21 09:31:13 +0100340 GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
Chris Wilson2ca9faa2017-04-05 16:30:54 +0100341
Chris Wilson57f275a2016-08-15 10:49:00 +0100342 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
343 * so long as the semaphore value in the register/page is greater
344 * than the sync value), so whenever we reset the seqno,
345 * so long as we reset the tracking semaphore value to 0, it will
346 * always be before the next request's seqno. If we don't reset
347 * the semaphore value, then when the seqno moves backwards all
348 * future waits will complete instantly (causing rendering corruption).
349 */
350 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
351 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
352 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
353 if (HAS_VEBOX(dev_priv))
354 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
355 }
Chris Wilson51d545d2016-08-15 10:49:02 +0100356 if (dev_priv->semaphore) {
357 struct page *page = i915_vma_first_page(dev_priv->semaphore);
358 void *semaphores;
359
360 /* Semaphores are in noncoherent memory, flush to be safe */
Chris Wilson24caf652017-03-20 14:56:09 +0000361 semaphores = kmap_atomic(page);
Chris Wilson57f275a2016-08-15 10:49:00 +0100362 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
363 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
Chris Wilson51d545d2016-08-15 10:49:02 +0100364 drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
365 I915_NUM_ENGINES * gen8_semaphore_seqno_size);
Chris Wilson24caf652017-03-20 14:56:09 +0000366 kunmap_atomic(semaphores);
Chris Wilson57f275a2016-08-15 10:49:00 +0100367 }
Chris Wilson57f275a2016-08-15 10:49:00 +0100368
369 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
Chris Wilson14a6bbf2017-03-14 11:14:52 +0000370 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
Chris Wilson73cb9702016-10-28 13:58:46 +0100371
Chris Wilson57f275a2016-08-15 10:49:00 +0100372 /* After manually advancing the seqno, fake the interrupt in case
373 * there are any waiters for that seqno.
374 */
375 intel_engine_wakeup(engine);
Chris Wilson2ca9faa2017-04-05 16:30:54 +0100376
377 GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
Chris Wilson57f275a2016-08-15 10:49:00 +0100378}
379
Chris Wilson73cb9702016-10-28 13:58:46 +0100380static void intel_engine_init_timeline(struct intel_engine_cs *engine)
Chris Wilsondcff85c2016-08-05 10:14:11 +0100381{
Chris Wilson73cb9702016-10-28 13:58:46 +0100382 engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
Chris Wilsondcff85c2016-08-05 10:14:11 +0100383}
384
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100385/**
386 * intel_engines_setup_common - setup engine state not requiring hw access
387 * @engine: Engine to setup.
388 *
389 * Initializes @engine@ structure members shared between legacy and execlists
390 * submission modes which do not require hardware access.
391 *
392 * Typically done early in the submission mode specific engine setup stage.
393 */
394void intel_engine_setup_common(struct intel_engine_cs *engine)
395{
Chris Wilson20311bd2016-11-14 20:41:03 +0000396 engine->execlist_queue = RB_ROOT;
397 engine->execlist_first = NULL;
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100398
Chris Wilson73cb9702016-10-28 13:58:46 +0100399 intel_engine_init_timeline(engine);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100400 intel_engine_init_hangcheck(engine);
Chris Wilson115003e92016-08-04 16:32:19 +0100401 i915_gem_batch_pool_init(engine, &engine->batch_pool);
Chris Wilson7756e452016-08-18 17:17:10 +0100402
403 intel_engine_init_cmd_parser(engine);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100404}
405
Chris Wilsonadc320c2016-08-15 10:48:59 +0100406int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
407{
408 struct drm_i915_gem_object *obj;
409 struct i915_vma *vma;
410 int ret;
411
412 WARN_ON(engine->scratch);
413
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000414 obj = i915_gem_object_create_stolen(engine->i915, size);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100415 if (!obj)
Chris Wilson920cf412016-10-28 13:58:30 +0100416 obj = i915_gem_object_create_internal(engine->i915, size);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100417 if (IS_ERR(obj)) {
418 DRM_ERROR("Failed to allocate scratch page\n");
419 return PTR_ERR(obj);
420 }
421
Chris Wilsona01cb372017-01-16 15:21:30 +0000422 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100423 if (IS_ERR(vma)) {
424 ret = PTR_ERR(vma);
425 goto err_unref;
426 }
427
428 ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
429 if (ret)
430 goto err_unref;
431
432 engine->scratch = vma;
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100433 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
434 engine->name, i915_ggtt_offset(vma));
Chris Wilsonadc320c2016-08-15 10:48:59 +0100435 return 0;
436
437err_unref:
438 i915_gem_object_put(obj);
439 return ret;
440}
441
442static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
443{
Chris Wilson19880c42016-08-15 10:49:05 +0100444 i915_vma_unpin_and_release(&engine->scratch);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100445}
446
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100447/**
448 * intel_engines_init_common - initialize cengine state which might require hw access
449 * @engine: Engine to initialize.
450 *
451 * Initializes @engine@ structure members shared between legacy and execlists
452 * submission modes which do require hardware access.
453 *
454 * Typcally done at later stages of submission mode specific engine setup.
455 *
456 * Returns zero on success or an error code on failure.
457 */
458int intel_engine_init_common(struct intel_engine_cs *engine)
459{
Chris Wilson266a2402017-05-04 10:33:08 +0100460 struct intel_ring *ring;
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100461 int ret;
462
Chris Wilsonff44ad52017-03-16 17:13:03 +0000463 engine->set_default_submission(engine);
464
Chris Wilsone8a9c582016-12-18 15:37:20 +0000465 /* We may need to do things with the shrinker which
466 * require us to immediately switch back to the default
467 * context. This can cause a problem as pinning the
468 * default context also requires GTT space which may not
469 * be available. To avoid this we always pin the default
470 * context.
471 */
Chris Wilson266a2402017-05-04 10:33:08 +0100472 ring = engine->context_pin(engine, engine->i915->kernel_context);
473 if (IS_ERR(ring))
474 return PTR_ERR(ring);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100475
Chris Wilsone8a9c582016-12-18 15:37:20 +0000476 ret = intel_engine_init_breadcrumbs(engine);
477 if (ret)
478 goto err_unpin;
479
Chris Wilson4e50f082016-10-28 13:58:31 +0100480 ret = i915_gem_render_state_init(engine);
481 if (ret)
Chris Wilsone8a9c582016-12-18 15:37:20 +0000482 goto err_unpin;
Chris Wilson4e50f082016-10-28 13:58:31 +0100483
Chris Wilson7756e452016-08-18 17:17:10 +0100484 return 0;
Chris Wilsone8a9c582016-12-18 15:37:20 +0000485
486err_unpin:
487 engine->context_unpin(engine, engine->i915->kernel_context);
488 return ret;
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100489}
Chris Wilson96a945a2016-08-03 13:19:16 +0100490
491/**
492 * intel_engines_cleanup_common - cleans up the engine state created by
493 * the common initiailizers.
494 * @engine: Engine to cleanup.
495 *
496 * This cleans up everything created by the common helpers.
497 */
498void intel_engine_cleanup_common(struct intel_engine_cs *engine)
499{
Chris Wilsonadc320c2016-08-15 10:48:59 +0100500 intel_engine_cleanup_scratch(engine);
501
Chris Wilson4e50f082016-10-28 13:58:31 +0100502 i915_gem_render_state_fini(engine);
Chris Wilson96a945a2016-08-03 13:19:16 +0100503 intel_engine_fini_breadcrumbs(engine);
Chris Wilson7756e452016-08-18 17:17:10 +0100504 intel_engine_cleanup_cmd_parser(engine);
Chris Wilson96a945a2016-08-03 13:19:16 +0100505 i915_gem_batch_pool_fini(&engine->batch_pool);
Chris Wilsone8a9c582016-12-18 15:37:20 +0000506
507 engine->context_unpin(engine, engine->i915->kernel_context);
Chris Wilson96a945a2016-08-03 13:19:16 +0100508}
Chris Wilson1b365952016-10-04 21:11:31 +0100509
510u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
511{
512 struct drm_i915_private *dev_priv = engine->i915;
513 u64 acthd;
514
515 if (INTEL_GEN(dev_priv) >= 8)
516 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
517 RING_ACTHD_UDW(engine->mmio_base));
518 else if (INTEL_GEN(dev_priv) >= 4)
519 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
520 else
521 acthd = I915_READ(ACTHD);
522
523 return acthd;
524}
525
526u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
527{
528 struct drm_i915_private *dev_priv = engine->i915;
529 u64 bbaddr;
530
531 if (INTEL_GEN(dev_priv) >= 8)
532 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
533 RING_BBADDR_UDW(engine->mmio_base));
534 else
535 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
536
537 return bbaddr;
538}
Chris Wilson0e704472016-10-12 10:05:17 +0100539
540const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
541{
542 switch (type) {
543 case I915_CACHE_NONE: return " uncached";
544 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
545 case I915_CACHE_L3_LLC: return " L3+LLC";
546 case I915_CACHE_WT: return " WT";
547 default: return "";
548 }
549}
550
551static inline uint32_t
552read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
553 int subslice, i915_reg_t reg)
554{
555 uint32_t mcr;
556 uint32_t ret;
557 enum forcewake_domains fw_domains;
558
559 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
560 FW_REG_READ);
561 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
562 GEN8_MCR_SELECTOR,
563 FW_REG_READ | FW_REG_WRITE);
564
565 spin_lock_irq(&dev_priv->uncore.lock);
566 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
567
568 mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
569 /*
570 * The HW expects the slice and sublice selectors to be reset to 0
571 * after reading out the registers.
572 */
573 WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
574 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
575 mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
576 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
577
578 ret = I915_READ_FW(reg);
579
580 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
581 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
582
583 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
584 spin_unlock_irq(&dev_priv->uncore.lock);
585
586 return ret;
587}
588
589/* NB: please notice the memset */
590void intel_engine_get_instdone(struct intel_engine_cs *engine,
591 struct intel_instdone *instdone)
592{
593 struct drm_i915_private *dev_priv = engine->i915;
594 u32 mmio_base = engine->mmio_base;
595 int slice;
596 int subslice;
597
598 memset(instdone, 0, sizeof(*instdone));
599
600 switch (INTEL_GEN(dev_priv)) {
601 default:
602 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
603
604 if (engine->id != RCS)
605 break;
606
607 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
608 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
609 instdone->sampler[slice][subslice] =
610 read_subslice_reg(dev_priv, slice, subslice,
611 GEN7_SAMPLER_INSTDONE);
612 instdone->row[slice][subslice] =
613 read_subslice_reg(dev_priv, slice, subslice,
614 GEN7_ROW_INSTDONE);
615 }
616 break;
617 case 7:
618 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
619
620 if (engine->id != RCS)
621 break;
622
623 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
624 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
625 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
626
627 break;
628 case 6:
629 case 5:
630 case 4:
631 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
632
633 if (engine->id == RCS)
634 /* HACK: Using the wrong struct member */
635 instdone->slice_common = I915_READ(GEN4_INSTDONE1);
636 break;
637 case 3:
638 case 2:
639 instdone->instdone = I915_READ(GEN2_INSTDONE);
640 break;
641 }
642}
Chris Wilsonf97fbf92017-02-13 17:15:14 +0000643
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000644static int wa_add(struct drm_i915_private *dev_priv,
645 i915_reg_t addr,
646 const u32 mask, const u32 val)
647{
648 const u32 idx = dev_priv->workarounds.count;
649
650 if (WARN_ON(idx >= I915_MAX_WA_REGS))
651 return -ENOSPC;
652
653 dev_priv->workarounds.reg[idx].addr = addr;
654 dev_priv->workarounds.reg[idx].value = val;
655 dev_priv->workarounds.reg[idx].mask = mask;
656
657 dev_priv->workarounds.count++;
658
659 return 0;
660}
661
662#define WA_REG(addr, mask, val) do { \
663 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
664 if (r) \
665 return r; \
666 } while (0)
667
668#define WA_SET_BIT_MASKED(addr, mask) \
669 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
670
671#define WA_CLR_BIT_MASKED(addr, mask) \
672 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
673
674#define WA_SET_FIELD_MASKED(addr, mask, value) \
675 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
676
677#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
678#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
679
680#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
681
682static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
683 i915_reg_t reg)
684{
685 struct drm_i915_private *dev_priv = engine->i915;
686 struct i915_workarounds *wa = &dev_priv->workarounds;
687 const uint32_t index = wa->hw_whitelist_count[engine->id];
688
689 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
690 return -EINVAL;
691
692 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
693 i915_mmio_reg_offset(reg));
694 wa->hw_whitelist_count[engine->id]++;
695
696 return 0;
697}
698
699static int gen8_init_workarounds(struct intel_engine_cs *engine)
700{
701 struct drm_i915_private *dev_priv = engine->i915;
702
703 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
704
705 /* WaDisableAsyncFlipPerfMode:bdw,chv */
706 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
707
708 /* WaDisablePartialInstShootdown:bdw,chv */
709 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
710 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
711
712 /* Use Force Non-Coherent whenever executing a 3D context. This is a
713 * workaround for for a possible hang in the unlikely event a TLB
714 * invalidation occurs during a PSD flush.
715 */
716 /* WaForceEnableNonCoherent:bdw,chv */
717 /* WaHdcDisableFetchWhenMasked:bdw,chv */
718 WA_SET_BIT_MASKED(HDC_CHICKEN0,
719 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
720 HDC_FORCE_NON_COHERENT);
721
722 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
723 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
724 * polygons in the same 8x4 pixel/sample area to be processed without
725 * stalling waiting for the earlier ones to write to Hierarchical Z
726 * buffer."
727 *
728 * This optimization is off by default for BDW and CHV; turn it on.
729 */
730 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
731
732 /* Wa4x4STCOptimizationDisable:bdw,chv */
733 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
734
735 /*
736 * BSpec recommends 8x4 when MSAA is used,
737 * however in practice 16x4 seems fastest.
738 *
739 * Note that PS/WM thread counts depend on the WIZ hashing
740 * disable bit, which we don't touch here, but it's good
741 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
742 */
743 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
744 GEN6_WIZ_HASHING_MASK,
745 GEN6_WIZ_HASHING_16x4);
746
747 return 0;
748}
749
750static int bdw_init_workarounds(struct intel_engine_cs *engine)
751{
752 struct drm_i915_private *dev_priv = engine->i915;
753 int ret;
754
755 ret = gen8_init_workarounds(engine);
756 if (ret)
757 return ret;
758
759 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
760 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
761
762 /* WaDisableDopClockGating:bdw
763 *
764 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
765 * to disable EUTC clock gating.
766 */
767 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
768 DOP_CLOCK_GATING_DISABLE);
769
770 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
771 GEN8_SAMPLER_POWER_BYPASS_DIS);
772
773 WA_SET_BIT_MASKED(HDC_CHICKEN0,
774 /* WaForceContextSaveRestoreNonCoherent:bdw */
775 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
776 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
777 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
778
779 return 0;
780}
781
782static int chv_init_workarounds(struct intel_engine_cs *engine)
783{
784 struct drm_i915_private *dev_priv = engine->i915;
785 int ret;
786
787 ret = gen8_init_workarounds(engine);
788 if (ret)
789 return ret;
790
791 /* WaDisableThreadStallDopClockGating:chv */
792 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
793
794 /* Improve HiZ throughput on CHV. */
795 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
796
797 return 0;
798}
799
800static int gen9_init_workarounds(struct intel_engine_cs *engine)
801{
802 struct drm_i915_private *dev_priv = engine->i915;
803 int ret;
804
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700805 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000806 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
807
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700808 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000809 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
810 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
811
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700812 /* WaDisableKillLogic:bxt,skl,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000813 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
814 ECOCHK_DIS_TLB);
815
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700816 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
817 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000818 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
819 FLOW_CONTROL_ENABLE |
820 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
821
822 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700823 if (!IS_COFFEELAKE(dev_priv))
824 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
825 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000826
827 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
828 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
829 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
830 GEN9_DG_MIRROR_FIX_ENABLE);
831
832 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
833 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
834 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
835 GEN9_RHWO_OPTIMIZATION_DISABLE);
836 /*
837 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
838 * but we do that in per ctx batchbuffer as there is an issue
839 * with this register not getting restored on ctx restore
840 */
841 }
842
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700843 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
844 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000845 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
Arkadiusz Hiler0b71cea2017-05-12 13:20:15 +0200846 GEN9_ENABLE_YV12_BUGFIX |
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000847 GEN9_ENABLE_GPGPU_PREEMPTION);
848
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700849 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
850 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000851 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
852 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
853
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700854 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000855 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
856 GEN9_CCS_TLB_PREFETCH_ENABLE);
857
858 /* WaDisableMaskBasedCammingInRCC:bxt */
859 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
860 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
861 PIXEL_MASK_CAMMING_DISABLE);
862
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700863 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000864 WA_SET_BIT_MASKED(HDC_CHICKEN0,
865 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
866 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
867
868 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
869 * both tied to WaForceContextSaveRestoreNonCoherent
870 * in some hsds for skl. We keep the tie for all gen9. The
871 * documentation is a bit hazy and so we want to get common behaviour,
872 * even though there is no clear evidence we would need both on kbl/bxt.
873 * This area has been source of system hangs so we play it safe
874 * and mimic the skl regardless of what bspec says.
875 *
876 * Use Force Non-Coherent whenever executing a 3D context. This
877 * is a workaround for a possible hang in the unlikely event
878 * a TLB invalidation occurs during a PSD flush.
879 */
880
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700881 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000882 WA_SET_BIT_MASKED(HDC_CHICKEN0,
883 HDC_FORCE_NON_COHERENT);
884
885 /* WaDisableHDCInvalidation:skl,bxt,kbl */
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700886 if (!IS_COFFEELAKE(dev_priv))
887 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
888 BDW_DISABLE_HDC_INVALIDATION);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000889
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700890 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000891 if (IS_SKYLAKE(dev_priv) ||
892 IS_KABYLAKE(dev_priv) ||
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700893 IS_COFFEELAKE(dev_priv) ||
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000894 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
895 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
896 GEN8_SAMPLER_POWER_BYPASS_DIS);
897
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700898 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000899 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
900
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700901 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000902 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
903 GEN8_LQSC_FLUSH_COHERENT_LINES));
904
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700905 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000906 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
907 if (ret)
908 return ret;
909
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700910 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000911 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
912 if (ret)
913 return ret;
914
Rodrigo Vivi46c26662017-06-16 15:49:58 -0700915 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000916 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
917 if (ret)
918 return ret;
919
920 return 0;
921}
922
923static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
924{
925 struct drm_i915_private *dev_priv = engine->i915;
926 u8 vals[3] = { 0, 0, 0 };
927 unsigned int i;
928
929 for (i = 0; i < 3; i++) {
930 u8 ss;
931
932 /*
933 * Only consider slices where one, and only one, subslice has 7
934 * EUs
935 */
936 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
937 continue;
938
939 /*
940 * subslice_7eu[i] != 0 (because of the check above) and
941 * ss_max == 4 (maximum number of subslices possible per slice)
942 *
943 * -> 0 <= ss <= 3;
944 */
945 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
946 vals[i] = 3 - ss;
947 }
948
949 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
950 return 0;
951
952 /* Tune IZ hashing. See intel_device_info_runtime_init() */
953 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
954 GEN9_IZ_HASHING_MASK(2) |
955 GEN9_IZ_HASHING_MASK(1) |
956 GEN9_IZ_HASHING_MASK(0),
957 GEN9_IZ_HASHING(2, vals[2]) |
958 GEN9_IZ_HASHING(1, vals[1]) |
959 GEN9_IZ_HASHING(0, vals[0]));
960
961 return 0;
962}
963
964static int skl_init_workarounds(struct intel_engine_cs *engine)
965{
966 struct drm_i915_private *dev_priv = engine->i915;
967 int ret;
968
969 ret = gen9_init_workarounds(engine);
970 if (ret)
971 return ret;
972
973 /*
974 * Actual WA is to disable percontext preemption granularity control
975 * until D0 which is the default case so this is equivalent to
976 * !WaDisablePerCtxtPreemptionGranularityControl:skl
977 */
978 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
979 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
980
981 /* WaEnableGapsTsvCreditFix:skl */
982 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
983 GEN9_GAPS_TSV_CREDIT_DISABLE));
984
985 /* WaDisableGafsUnitClkGating:skl */
986 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
987
988 /* WaInPlaceDecompressionHang:skl */
989 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
990 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
991 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
992
993 /* WaDisableLSQCROPERFforOCL:skl */
994 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
995 if (ret)
996 return ret;
997
998 return skl_tune_iz_hashing(engine);
999}
1000
1001static int bxt_init_workarounds(struct intel_engine_cs *engine)
1002{
1003 struct drm_i915_private *dev_priv = engine->i915;
1004 int ret;
1005
1006 ret = gen9_init_workarounds(engine);
1007 if (ret)
1008 return ret;
1009
1010 /* WaStoreMultiplePTEenable:bxt */
1011 /* This is a requirement according to Hardware specification */
1012 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1013 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1014
1015 /* WaSetClckGatingDisableMedia:bxt */
1016 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1017 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1018 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1019 }
1020
1021 /* WaDisableThreadStallDopClockGating:bxt */
1022 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1023 STALL_DOP_GATING_DISABLE);
1024
1025 /* WaDisablePooledEuLoadBalancingFix:bxt */
1026 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1027 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1028 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1029 }
1030
1031 /* WaDisableSbeCacheDispatchPortSharing:bxt */
1032 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1033 WA_SET_BIT_MASKED(
1034 GEN7_HALF_SLICE_CHICKEN1,
1035 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1036 }
1037
1038 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1039 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1040 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1041 /* WaDisableLSQCROPERFforOCL:bxt */
1042 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1043 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1044 if (ret)
1045 return ret;
1046
1047 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1048 if (ret)
1049 return ret;
1050 }
1051
1052 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1053 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
1054 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1055 L3_HIGH_PRIO_CREDITS(2));
1056
1057 /* WaToEnableHwFixForPushConstHWBug:bxt */
1058 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1059 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1060 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1061
1062 /* WaInPlaceDecompressionHang:bxt */
1063 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1064 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1065 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1066
1067 return 0;
1068}
1069
1070static int kbl_init_workarounds(struct intel_engine_cs *engine)
1071{
1072 struct drm_i915_private *dev_priv = engine->i915;
1073 int ret;
1074
1075 ret = gen9_init_workarounds(engine);
1076 if (ret)
1077 return ret;
1078
1079 /* WaEnableGapsTsvCreditFix:kbl */
1080 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1081 GEN9_GAPS_TSV_CREDIT_DISABLE));
1082
1083 /* WaDisableDynamicCreditSharing:kbl */
1084 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1085 WA_SET_BIT(GAMT_CHKN_BIT_REG,
1086 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1087
1088 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1089 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1090 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1091 HDC_FENCE_DEST_SLM_DISABLE);
1092
1093 /* WaToEnableHwFixForPushConstHWBug:kbl */
1094 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1095 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1096 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1097
1098 /* WaDisableGafsUnitClkGating:kbl */
1099 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1100
1101 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1102 WA_SET_BIT_MASKED(
1103 GEN7_HALF_SLICE_CHICKEN1,
1104 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1105
1106 /* WaInPlaceDecompressionHang:kbl */
1107 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1108 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1109
1110 /* WaDisableLSQCROPERFforOCL:kbl */
1111 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1112 if (ret)
1113 return ret;
1114
1115 return 0;
1116}
1117
1118static int glk_init_workarounds(struct intel_engine_cs *engine)
1119{
1120 struct drm_i915_private *dev_priv = engine->i915;
1121 int ret;
1122
1123 ret = gen9_init_workarounds(engine);
1124 if (ret)
1125 return ret;
1126
1127 /* WaToEnableHwFixForPushConstHWBug:glk */
1128 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1129 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1130
1131 return 0;
1132}
1133
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001134static int cfl_init_workarounds(struct intel_engine_cs *engine)
1135{
1136 struct drm_i915_private *dev_priv = engine->i915;
1137 int ret;
1138
1139 ret = gen9_init_workarounds(engine);
1140 if (ret)
1141 return ret;
1142
1143 /* WaEnableGapsTsvCreditFix:cfl */
1144 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1145 GEN9_GAPS_TSV_CREDIT_DISABLE));
1146
1147 /* WaToEnableHwFixForPushConstHWBug:cfl */
1148 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1149 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1150
1151 /* WaDisableGafsUnitClkGating:cfl */
1152 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1153
1154 /* WaDisableSbeCacheDispatchPortSharing:cfl */
1155 WA_SET_BIT_MASKED(
1156 GEN7_HALF_SLICE_CHICKEN1,
1157 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1158
1159 /* WaInPlaceDecompressionHang:cfl */
1160 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1161 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1162
1163 return 0;
1164}
1165
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001166int init_workarounds_ring(struct intel_engine_cs *engine)
1167{
1168 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson02e012f2017-03-01 12:11:31 +00001169 int err;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001170
1171 WARN_ON(engine->id != RCS);
1172
1173 dev_priv->workarounds.count = 0;
Chris Wilson02e012f2017-03-01 12:11:31 +00001174 dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001175
1176 if (IS_BROADWELL(dev_priv))
Chris Wilson02e012f2017-03-01 12:11:31 +00001177 err = bdw_init_workarounds(engine);
1178 else if (IS_CHERRYVIEW(dev_priv))
1179 err = chv_init_workarounds(engine);
1180 else if (IS_SKYLAKE(dev_priv))
1181 err = skl_init_workarounds(engine);
1182 else if (IS_BROXTON(dev_priv))
1183 err = bxt_init_workarounds(engine);
1184 else if (IS_KABYLAKE(dev_priv))
1185 err = kbl_init_workarounds(engine);
1186 else if (IS_GEMINILAKE(dev_priv))
1187 err = glk_init_workarounds(engine);
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001188 else if (IS_COFFEELAKE(dev_priv))
1189 err = cfl_init_workarounds(engine);
Chris Wilson02e012f2017-03-01 12:11:31 +00001190 else
1191 err = 0;
1192 if (err)
1193 return err;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001194
Chris Wilson02e012f2017-03-01 12:11:31 +00001195 DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
1196 engine->name, dev_priv->workarounds.count);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001197 return 0;
1198}
1199
1200int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
1201{
1202 struct i915_workarounds *w = &req->i915->workarounds;
1203 u32 *cs;
1204 int ret, i;
1205
1206 if (w->count == 0)
1207 return 0;
1208
1209 ret = req->engine->emit_flush(req, EMIT_BARRIER);
1210 if (ret)
1211 return ret;
1212
1213 cs = intel_ring_begin(req, (w->count * 2 + 2));
1214 if (IS_ERR(cs))
1215 return PTR_ERR(cs);
1216
1217 *cs++ = MI_LOAD_REGISTER_IMM(w->count);
1218 for (i = 0; i < w->count; i++) {
1219 *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
1220 *cs++ = w->reg[i].value;
1221 }
1222 *cs++ = MI_NOOP;
1223
1224 intel_ring_advance(req, cs);
1225
1226 ret = req->engine->emit_flush(req, EMIT_BARRIER);
1227 if (ret)
1228 return ret;
1229
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001230 return 0;
1231}
1232
Chris Wilsona091d4e2017-05-30 13:13:33 +01001233static bool ring_is_idle(struct intel_engine_cs *engine)
1234{
1235 struct drm_i915_private *dev_priv = engine->i915;
1236 bool idle = true;
1237
1238 intel_runtime_pm_get(dev_priv);
1239
Chris Wilsonaed2fc12017-05-30 13:13:34 +01001240 /* First check that no commands are left in the ring */
1241 if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
1242 (I915_READ_TAIL(engine) & TAIL_ADDR))
1243 idle = false;
1244
Chris Wilsona091d4e2017-05-30 13:13:33 +01001245 /* No bit for gen2, so assume the CS parser is idle */
1246 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1247 idle = false;
1248
1249 intel_runtime_pm_put(dev_priv);
1250
1251 return idle;
1252}
1253
Chris Wilson54003672017-03-03 12:19:46 +00001254/**
1255 * intel_engine_is_idle() - Report if the engine has finished process all work
1256 * @engine: the intel_engine_cs
1257 *
1258 * Return true if there are no requests pending, nothing left to be submitted
1259 * to hardware, and that the engine is idle.
1260 */
1261bool intel_engine_is_idle(struct intel_engine_cs *engine)
1262{
1263 struct drm_i915_private *dev_priv = engine->i915;
1264
Chris Wilsona8e9a412017-04-11 20:00:42 +01001265 /* More white lies, if wedged, hw state is inconsistent */
1266 if (i915_terminally_wedged(&dev_priv->gpu_error))
1267 return true;
1268
Chris Wilson54003672017-03-03 12:19:46 +00001269 /* Any inflight/incomplete requests? */
1270 if (!i915_seqno_passed(intel_engine_get_seqno(engine),
1271 intel_engine_last_submit(engine)))
1272 return false;
1273
Chris Wilson8968a362017-04-12 00:44:26 +01001274 if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
1275 return true;
1276
Chris Wilson54003672017-03-03 12:19:46 +00001277 /* Interrupt/tasklet pending? */
1278 if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
1279 return false;
1280
1281 /* Both ports drained, no more ELSP submission? */
Chris Wilson77f0d0e2017-05-17 13:10:00 +01001282 if (port_request(&engine->execlist_port[0]))
Chris Wilson54003672017-03-03 12:19:46 +00001283 return false;
1284
1285 /* Ring stopped? */
Chris Wilsona091d4e2017-05-30 13:13:33 +01001286 if (!ring_is_idle(engine))
Chris Wilson54003672017-03-03 12:19:46 +00001287 return false;
1288
1289 return true;
1290}
1291
Chris Wilson05425242017-03-03 12:19:47 +00001292bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
1293{
1294 struct intel_engine_cs *engine;
1295 enum intel_engine_id id;
1296
Chris Wilson8490ae202017-03-30 15:50:37 +01001297 if (READ_ONCE(dev_priv->gt.active_requests))
1298 return false;
1299
1300 /* If the driver is wedged, HW state may be very inconsistent and
1301 * report that it is still busy, even though we have stopped using it.
1302 */
1303 if (i915_terminally_wedged(&dev_priv->gpu_error))
1304 return true;
1305
Chris Wilson05425242017-03-03 12:19:47 +00001306 for_each_engine(engine, dev_priv, id) {
1307 if (!intel_engine_is_idle(engine))
1308 return false;
1309 }
1310
1311 return true;
1312}
1313
Chris Wilsonff44ad52017-03-16 17:13:03 +00001314void intel_engines_reset_default_submission(struct drm_i915_private *i915)
1315{
1316 struct intel_engine_cs *engine;
1317 enum intel_engine_id id;
1318
1319 for_each_engine(engine, i915, id)
1320 engine->set_default_submission(engine);
1321}
1322
Chris Wilson6c067572017-05-17 13:10:03 +01001323void intel_engines_mark_idle(struct drm_i915_private *i915)
1324{
1325 struct intel_engine_cs *engine;
1326 enum intel_engine_id id;
1327
1328 for_each_engine(engine, i915, id) {
1329 intel_engine_disarm_breadcrumbs(engine);
1330 i915_gem_batch_pool_fini(&engine->batch_pool);
1331 engine->no_priolist = false;
1332 }
1333}
1334
Chris Wilsonf97fbf92017-02-13 17:15:14 +00001335#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1336#include "selftests/mock_engine.c"
1337#endif