blob: 058ecc020b28c795f7e283dc442d48291462c8d3 [file] [log] [blame]
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_drv.h"
26#include "intel_ringbuffer.h"
27#include "intel_lrc.h"
28
Oscar Mateob8400f02017-04-10 07:34:32 -070029struct engine_class_info {
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010030 const char *name;
Oscar Mateob8400f02017-04-10 07:34:32 -070031 int (*init_legacy)(struct intel_engine_cs *engine);
32 int (*init_execlists)(struct intel_engine_cs *engine);
33};
34
35static const struct engine_class_info intel_engine_classes[] = {
36 [RENDER_CLASS] = {
37 .name = "rcs",
38 .init_execlists = logical_render_ring_init,
39 .init_legacy = intel_init_render_ring_buffer,
40 },
41 [COPY_ENGINE_CLASS] = {
42 .name = "bcs",
43 .init_execlists = logical_xcs_ring_init,
44 .init_legacy = intel_init_blt_ring_buffer,
45 },
46 [VIDEO_DECODE_CLASS] = {
47 .name = "vcs",
48 .init_execlists = logical_xcs_ring_init,
49 .init_legacy = intel_init_bsd_ring_buffer,
50 },
51 [VIDEO_ENHANCEMENT_CLASS] = {
52 .name = "vecs",
53 .init_execlists = logical_xcs_ring_init,
54 .init_legacy = intel_init_vebox_ring_buffer,
55 },
56};
57
58struct engine_info {
Michal Wajdeczko237ae7c2017-03-01 20:26:15 +000059 unsigned int hw_id;
Oscar Mateob8400f02017-04-10 07:34:32 -070060 unsigned int exec_id;
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -070061 u8 class;
62 u8 instance;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010063 u32 mmio_base;
64 unsigned irq_shift;
Oscar Mateob8400f02017-04-10 07:34:32 -070065};
66
67static const struct engine_info intel_engines[] = {
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010068 [RCS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010069 .hw_id = RCS_HW,
Chris Wilson17ab7922017-03-30 14:48:20 +010070 .exec_id = I915_EXEC_RENDER,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -070071 .class = RENDER_CLASS,
72 .instance = 0,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010073 .mmio_base = RENDER_RING_BASE,
74 .irq_shift = GEN8_RCS_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010075 },
76 [BCS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010077 .hw_id = BCS_HW,
Chris Wilson17ab7922017-03-30 14:48:20 +010078 .exec_id = I915_EXEC_BLT,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -070079 .class = COPY_ENGINE_CLASS,
80 .instance = 0,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010081 .mmio_base = BLT_RING_BASE,
82 .irq_shift = GEN8_BCS_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010083 },
84 [VCS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010085 .hw_id = VCS_HW,
Chris Wilson17ab7922017-03-30 14:48:20 +010086 .exec_id = I915_EXEC_BSD,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -070087 .class = VIDEO_DECODE_CLASS,
88 .instance = 0,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010089 .mmio_base = GEN6_BSD_RING_BASE,
90 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010091 },
92 [VCS2] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010093 .hw_id = VCS2_HW,
Chris Wilson17ab7922017-03-30 14:48:20 +010094 .exec_id = I915_EXEC_BSD,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -070095 .class = VIDEO_DECODE_CLASS,
96 .instance = 1,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010097 .mmio_base = GEN8_BSD2_RING_BASE,
98 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010099 },
100 [VECS] = {
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100101 .hw_id = VECS_HW,
Chris Wilson17ab7922017-03-30 14:48:20 +0100102 .exec_id = I915_EXEC_VEBOX,
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700103 .class = VIDEO_ENHANCEMENT_CLASS,
104 .instance = 0,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100105 .mmio_base = VEBOX_RING_BASE,
106 .irq_shift = GEN8_VECS_IRQ_SHIFT,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100107 },
108};
109
Akash Goel3b3f1652016-10-13 22:44:48 +0530110static int
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100111intel_engine_setup(struct drm_i915_private *dev_priv,
112 enum intel_engine_id id)
113{
114 const struct engine_info *info = &intel_engines[id];
Oscar Mateob8400f02017-04-10 07:34:32 -0700115 const struct engine_class_info *class_info;
Akash Goel3b3f1652016-10-13 22:44:48 +0530116 struct intel_engine_cs *engine;
117
Oscar Mateob8400f02017-04-10 07:34:32 -0700118 GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
119 class_info = &intel_engine_classes[info->class];
120
Akash Goel3b3f1652016-10-13 22:44:48 +0530121 GEM_BUG_ON(dev_priv->engine[id]);
122 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
123 if (!engine)
124 return -ENOMEM;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100125
126 engine->id = id;
127 engine->i915 = dev_priv;
Oscar Mateo6e516142017-04-10 07:34:31 -0700128 WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
Oscar Mateob8400f02017-04-10 07:34:32 -0700129 class_info->name, info->instance) >=
130 sizeof(engine->name));
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100131 engine->exec_id = info->exec_id;
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100132 engine->hw_id = engine->guc_id = info->hw_id;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100133 engine->mmio_base = info->mmio_base;
134 engine->irq_shift = info->irq_shift;
Daniele Ceraolo Spurio09081802017-04-10 07:34:29 -0700135 engine->class = info->class;
136 engine->instance = info->instance;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100137
Chris Wilson0de91362016-11-14 20:41:01 +0000138 /* Nothing to do here, execute in order of dependencies */
139 engine->schedule = NULL;
140
Changbin Du3fc03062017-03-13 10:47:11 +0800141 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
142
Akash Goel3b3f1652016-10-13 22:44:48 +0530143 dev_priv->engine[id] = engine;
144 return 0;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100145}
146
147/**
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000148 * intel_engines_init_early() - allocate the Engine Command Streamers
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +0000149 * @dev_priv: i915 device private
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100150 *
151 * Return: non-zero if the initialization failed.
152 */
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000153int intel_engines_init_early(struct drm_i915_private *dev_priv)
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100154{
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100155 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
Tvrtko Ursulin70006ad2016-10-13 11:02:56 +0100156 unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100157 unsigned int mask = 0;
Akash Goel3b3f1652016-10-13 22:44:48 +0530158 struct intel_engine_cs *engine;
159 enum intel_engine_id id;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100160 unsigned int i;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000161 int err;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100162
Tvrtko Ursulin70006ad2016-10-13 11:02:56 +0100163 WARN_ON(ring_mask == 0);
164 WARN_ON(ring_mask &
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100165 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
166
167 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
168 if (!HAS_ENGINE(dev_priv, i))
169 continue;
170
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000171 err = intel_engine_setup(dev_priv, i);
172 if (err)
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100173 goto cleanup;
174
175 mask |= ENGINE_MASK(i);
176 }
177
178 /*
179 * Catch failures to update intel_engines table when the new engines
180 * are added to the driver by a warning and disabling the forgotten
181 * engines.
182 */
Tvrtko Ursulin70006ad2016-10-13 11:02:56 +0100183 if (WARN_ON(mask != ring_mask))
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100184 device_info->ring_mask = mask;
185
186 device_info->num_rings = hweight32(mask);
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100187
188 return 0;
189
190cleanup:
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000191 for_each_engine(engine, dev_priv, id)
192 kfree(engine);
193 return err;
194}
195
196/**
197 * intel_engines_init() - allocate, populate and init the Engine Command Streamers
198 * @dev_priv: i915 device private
199 *
200 * Return: non-zero if the initialization failed.
201 */
202int intel_engines_init(struct drm_i915_private *dev_priv)
203{
204 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
205 struct intel_engine_cs *engine;
206 enum intel_engine_id id, err_id;
207 unsigned int mask = 0;
208 int err = 0;
209
Akash Goel3b3f1652016-10-13 22:44:48 +0530210 for_each_engine(engine, dev_priv, id) {
Oscar Mateob8400f02017-04-10 07:34:32 -0700211 const struct engine_class_info *class_info =
212 &intel_engine_classes[engine->class];
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000213 int (*init)(struct intel_engine_cs *engine);
214
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100215 if (i915.enable_execlists)
Oscar Mateob8400f02017-04-10 07:34:32 -0700216 init = class_info->init_execlists;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000217 else
Oscar Mateob8400f02017-04-10 07:34:32 -0700218 init = class_info->init_legacy;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000219 if (!init) {
220 kfree(engine);
221 dev_priv->engine[id] = NULL;
222 continue;
223 }
224
225 err = init(engine);
226 if (err) {
227 err_id = id;
228 goto cleanup;
229 }
230
Chris Wilsonff44ad52017-03-16 17:13:03 +0000231 GEM_BUG_ON(!engine->submit_request);
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000232 mask |= ENGINE_MASK(id);
233 }
234
235 /*
236 * Catch failures to update intel_engines table when the new engines
237 * are added to the driver by a warning and disabling the forgotten
238 * engines.
239 */
240 if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
241 device_info->ring_mask = mask;
242
243 device_info->num_rings = hweight32(mask);
244
245 return 0;
246
247cleanup:
248 for_each_engine(engine, dev_priv, id) {
249 if (id >= err_id)
250 kfree(engine);
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100251 else
Tvrtko Ursulin8ee7c6e2017-02-16 12:23:22 +0000252 dev_priv->gt.cleanup_engine(engine);
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100253 }
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000254 return err;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100255}
256
Chris Wilson73cb9702016-10-28 13:58:46 +0100257void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
Chris Wilson57f275a2016-08-15 10:49:00 +0100258{
259 struct drm_i915_private *dev_priv = engine->i915;
260
Chris Wilson2ca9faa2017-04-05 16:30:54 +0100261 GEM_BUG_ON(!intel_engine_is_idle(engine));
262
Chris Wilson57f275a2016-08-15 10:49:00 +0100263 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
264 * so long as the semaphore value in the register/page is greater
265 * than the sync value), so whenever we reset the seqno,
266 * so long as we reset the tracking semaphore value to 0, it will
267 * always be before the next request's seqno. If we don't reset
268 * the semaphore value, then when the seqno moves backwards all
269 * future waits will complete instantly (causing rendering corruption).
270 */
271 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
272 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
273 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
274 if (HAS_VEBOX(dev_priv))
275 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
276 }
Chris Wilson51d545d2016-08-15 10:49:02 +0100277 if (dev_priv->semaphore) {
278 struct page *page = i915_vma_first_page(dev_priv->semaphore);
279 void *semaphores;
280
281 /* Semaphores are in noncoherent memory, flush to be safe */
Chris Wilson24caf652017-03-20 14:56:09 +0000282 semaphores = kmap_atomic(page);
Chris Wilson57f275a2016-08-15 10:49:00 +0100283 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
284 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
Chris Wilson51d545d2016-08-15 10:49:02 +0100285 drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
286 I915_NUM_ENGINES * gen8_semaphore_seqno_size);
Chris Wilson24caf652017-03-20 14:56:09 +0000287 kunmap_atomic(semaphores);
Chris Wilson57f275a2016-08-15 10:49:00 +0100288 }
Chris Wilson57f275a2016-08-15 10:49:00 +0100289
290 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
Chris Wilson14a6bbf2017-03-14 11:14:52 +0000291 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
Chris Wilson73cb9702016-10-28 13:58:46 +0100292
293 GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
Chris Wilson57f275a2016-08-15 10:49:00 +0100294 engine->hangcheck.seqno = seqno;
295
296 /* After manually advancing the seqno, fake the interrupt in case
297 * there are any waiters for that seqno.
298 */
299 intel_engine_wakeup(engine);
Chris Wilson2ca9faa2017-04-05 16:30:54 +0100300
301 GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
Chris Wilson57f275a2016-08-15 10:49:00 +0100302}
303
Chris Wilson73cb9702016-10-28 13:58:46 +0100304static void intel_engine_init_timeline(struct intel_engine_cs *engine)
Chris Wilsondcff85c2016-08-05 10:14:11 +0100305{
Chris Wilson73cb9702016-10-28 13:58:46 +0100306 engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
Chris Wilsondcff85c2016-08-05 10:14:11 +0100307}
308
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100309/**
310 * intel_engines_setup_common - setup engine state not requiring hw access
311 * @engine: Engine to setup.
312 *
313 * Initializes @engine@ structure members shared between legacy and execlists
314 * submission modes which do not require hardware access.
315 *
316 * Typically done early in the submission mode specific engine setup stage.
317 */
318void intel_engine_setup_common(struct intel_engine_cs *engine)
319{
Chris Wilson20311bd2016-11-14 20:41:03 +0000320 engine->execlist_queue = RB_ROOT;
321 engine->execlist_first = NULL;
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100322
Chris Wilson73cb9702016-10-28 13:58:46 +0100323 intel_engine_init_timeline(engine);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100324 intel_engine_init_hangcheck(engine);
Chris Wilson115003e92016-08-04 16:32:19 +0100325 i915_gem_batch_pool_init(engine, &engine->batch_pool);
Chris Wilson7756e452016-08-18 17:17:10 +0100326
327 intel_engine_init_cmd_parser(engine);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100328}
329
Chris Wilsonadc320c2016-08-15 10:48:59 +0100330int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
331{
332 struct drm_i915_gem_object *obj;
333 struct i915_vma *vma;
334 int ret;
335
336 WARN_ON(engine->scratch);
337
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000338 obj = i915_gem_object_create_stolen(engine->i915, size);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100339 if (!obj)
Chris Wilson920cf412016-10-28 13:58:30 +0100340 obj = i915_gem_object_create_internal(engine->i915, size);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100341 if (IS_ERR(obj)) {
342 DRM_ERROR("Failed to allocate scratch page\n");
343 return PTR_ERR(obj);
344 }
345
Chris Wilsona01cb372017-01-16 15:21:30 +0000346 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100347 if (IS_ERR(vma)) {
348 ret = PTR_ERR(vma);
349 goto err_unref;
350 }
351
352 ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
353 if (ret)
354 goto err_unref;
355
356 engine->scratch = vma;
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100357 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
358 engine->name, i915_ggtt_offset(vma));
Chris Wilsonadc320c2016-08-15 10:48:59 +0100359 return 0;
360
361err_unref:
362 i915_gem_object_put(obj);
363 return ret;
364}
365
366static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
367{
Chris Wilson19880c42016-08-15 10:49:05 +0100368 i915_vma_unpin_and_release(&engine->scratch);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100369}
370
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100371/**
372 * intel_engines_init_common - initialize cengine state which might require hw access
373 * @engine: Engine to initialize.
374 *
375 * Initializes @engine@ structure members shared between legacy and execlists
376 * submission modes which do require hardware access.
377 *
378 * Typcally done at later stages of submission mode specific engine setup.
379 *
380 * Returns zero on success or an error code on failure.
381 */
382int intel_engine_init_common(struct intel_engine_cs *engine)
383{
384 int ret;
385
Chris Wilsonff44ad52017-03-16 17:13:03 +0000386 engine->set_default_submission(engine);
387
Chris Wilsone8a9c582016-12-18 15:37:20 +0000388 /* We may need to do things with the shrinker which
389 * require us to immediately switch back to the default
390 * context. This can cause a problem as pinning the
391 * default context also requires GTT space which may not
392 * be available. To avoid this we always pin the default
393 * context.
394 */
395 ret = engine->context_pin(engine, engine->i915->kernel_context);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100396 if (ret)
397 return ret;
398
Chris Wilsone8a9c582016-12-18 15:37:20 +0000399 ret = intel_engine_init_breadcrumbs(engine);
400 if (ret)
401 goto err_unpin;
402
Chris Wilson4e50f082016-10-28 13:58:31 +0100403 ret = i915_gem_render_state_init(engine);
404 if (ret)
Chris Wilsone8a9c582016-12-18 15:37:20 +0000405 goto err_unpin;
Chris Wilson4e50f082016-10-28 13:58:31 +0100406
Chris Wilson7756e452016-08-18 17:17:10 +0100407 return 0;
Chris Wilsone8a9c582016-12-18 15:37:20 +0000408
409err_unpin:
410 engine->context_unpin(engine, engine->i915->kernel_context);
411 return ret;
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100412}
Chris Wilson96a945a2016-08-03 13:19:16 +0100413
414/**
415 * intel_engines_cleanup_common - cleans up the engine state created by
416 * the common initiailizers.
417 * @engine: Engine to cleanup.
418 *
419 * This cleans up everything created by the common helpers.
420 */
421void intel_engine_cleanup_common(struct intel_engine_cs *engine)
422{
Chris Wilsonadc320c2016-08-15 10:48:59 +0100423 intel_engine_cleanup_scratch(engine);
424
Chris Wilson4e50f082016-10-28 13:58:31 +0100425 i915_gem_render_state_fini(engine);
Chris Wilson96a945a2016-08-03 13:19:16 +0100426 intel_engine_fini_breadcrumbs(engine);
Chris Wilson7756e452016-08-18 17:17:10 +0100427 intel_engine_cleanup_cmd_parser(engine);
Chris Wilson96a945a2016-08-03 13:19:16 +0100428 i915_gem_batch_pool_fini(&engine->batch_pool);
Chris Wilsone8a9c582016-12-18 15:37:20 +0000429
430 engine->context_unpin(engine, engine->i915->kernel_context);
Chris Wilson96a945a2016-08-03 13:19:16 +0100431}
Chris Wilson1b365952016-10-04 21:11:31 +0100432
433u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
434{
435 struct drm_i915_private *dev_priv = engine->i915;
436 u64 acthd;
437
438 if (INTEL_GEN(dev_priv) >= 8)
439 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
440 RING_ACTHD_UDW(engine->mmio_base));
441 else if (INTEL_GEN(dev_priv) >= 4)
442 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
443 else
444 acthd = I915_READ(ACTHD);
445
446 return acthd;
447}
448
449u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
450{
451 struct drm_i915_private *dev_priv = engine->i915;
452 u64 bbaddr;
453
454 if (INTEL_GEN(dev_priv) >= 8)
455 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
456 RING_BBADDR_UDW(engine->mmio_base));
457 else
458 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
459
460 return bbaddr;
461}
Chris Wilson0e704472016-10-12 10:05:17 +0100462
463const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
464{
465 switch (type) {
466 case I915_CACHE_NONE: return " uncached";
467 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
468 case I915_CACHE_L3_LLC: return " L3+LLC";
469 case I915_CACHE_WT: return " WT";
470 default: return "";
471 }
472}
473
474static inline uint32_t
475read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
476 int subslice, i915_reg_t reg)
477{
478 uint32_t mcr;
479 uint32_t ret;
480 enum forcewake_domains fw_domains;
481
482 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
483 FW_REG_READ);
484 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
485 GEN8_MCR_SELECTOR,
486 FW_REG_READ | FW_REG_WRITE);
487
488 spin_lock_irq(&dev_priv->uncore.lock);
489 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
490
491 mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
492 /*
493 * The HW expects the slice and sublice selectors to be reset to 0
494 * after reading out the registers.
495 */
496 WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
497 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
498 mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
499 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
500
501 ret = I915_READ_FW(reg);
502
503 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
504 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
505
506 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
507 spin_unlock_irq(&dev_priv->uncore.lock);
508
509 return ret;
510}
511
512/* NB: please notice the memset */
513void intel_engine_get_instdone(struct intel_engine_cs *engine,
514 struct intel_instdone *instdone)
515{
516 struct drm_i915_private *dev_priv = engine->i915;
517 u32 mmio_base = engine->mmio_base;
518 int slice;
519 int subslice;
520
521 memset(instdone, 0, sizeof(*instdone));
522
523 switch (INTEL_GEN(dev_priv)) {
524 default:
525 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
526
527 if (engine->id != RCS)
528 break;
529
530 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
531 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
532 instdone->sampler[slice][subslice] =
533 read_subslice_reg(dev_priv, slice, subslice,
534 GEN7_SAMPLER_INSTDONE);
535 instdone->row[slice][subslice] =
536 read_subslice_reg(dev_priv, slice, subslice,
537 GEN7_ROW_INSTDONE);
538 }
539 break;
540 case 7:
541 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
542
543 if (engine->id != RCS)
544 break;
545
546 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
547 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
548 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
549
550 break;
551 case 6:
552 case 5:
553 case 4:
554 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
555
556 if (engine->id == RCS)
557 /* HACK: Using the wrong struct member */
558 instdone->slice_common = I915_READ(GEN4_INSTDONE1);
559 break;
560 case 3:
561 case 2:
562 instdone->instdone = I915_READ(GEN2_INSTDONE);
563 break;
564 }
565}
Chris Wilsonf97fbf92017-02-13 17:15:14 +0000566
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +0000567static int wa_add(struct drm_i915_private *dev_priv,
568 i915_reg_t addr,
569 const u32 mask, const u32 val)
570{
571 const u32 idx = dev_priv->workarounds.count;
572
573 if (WARN_ON(idx >= I915_MAX_WA_REGS))
574 return -ENOSPC;
575
576 dev_priv->workarounds.reg[idx].addr = addr;
577 dev_priv->workarounds.reg[idx].value = val;
578 dev_priv->workarounds.reg[idx].mask = mask;
579
580 dev_priv->workarounds.count++;
581
582 return 0;
583}
584
585#define WA_REG(addr, mask, val) do { \
586 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
587 if (r) \
588 return r; \
589 } while (0)
590
591#define WA_SET_BIT_MASKED(addr, mask) \
592 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
593
594#define WA_CLR_BIT_MASKED(addr, mask) \
595 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
596
597#define WA_SET_FIELD_MASKED(addr, mask, value) \
598 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
599
600#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
601#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
602
603#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
604
605static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
606 i915_reg_t reg)
607{
608 struct drm_i915_private *dev_priv = engine->i915;
609 struct i915_workarounds *wa = &dev_priv->workarounds;
610 const uint32_t index = wa->hw_whitelist_count[engine->id];
611
612 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
613 return -EINVAL;
614
615 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
616 i915_mmio_reg_offset(reg));
617 wa->hw_whitelist_count[engine->id]++;
618
619 return 0;
620}
621
622static int gen8_init_workarounds(struct intel_engine_cs *engine)
623{
624 struct drm_i915_private *dev_priv = engine->i915;
625
626 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
627
628 /* WaDisableAsyncFlipPerfMode:bdw,chv */
629 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
630
631 /* WaDisablePartialInstShootdown:bdw,chv */
632 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
633 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
634
635 /* Use Force Non-Coherent whenever executing a 3D context. This is a
636 * workaround for for a possible hang in the unlikely event a TLB
637 * invalidation occurs during a PSD flush.
638 */
639 /* WaForceEnableNonCoherent:bdw,chv */
640 /* WaHdcDisableFetchWhenMasked:bdw,chv */
641 WA_SET_BIT_MASKED(HDC_CHICKEN0,
642 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
643 HDC_FORCE_NON_COHERENT);
644
645 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
646 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
647 * polygons in the same 8x4 pixel/sample area to be processed without
648 * stalling waiting for the earlier ones to write to Hierarchical Z
649 * buffer."
650 *
651 * This optimization is off by default for BDW and CHV; turn it on.
652 */
653 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
654
655 /* Wa4x4STCOptimizationDisable:bdw,chv */
656 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
657
658 /*
659 * BSpec recommends 8x4 when MSAA is used,
660 * however in practice 16x4 seems fastest.
661 *
662 * Note that PS/WM thread counts depend on the WIZ hashing
663 * disable bit, which we don't touch here, but it's good
664 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
665 */
666 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
667 GEN6_WIZ_HASHING_MASK,
668 GEN6_WIZ_HASHING_16x4);
669
670 return 0;
671}
672
673static int bdw_init_workarounds(struct intel_engine_cs *engine)
674{
675 struct drm_i915_private *dev_priv = engine->i915;
676 int ret;
677
678 ret = gen8_init_workarounds(engine);
679 if (ret)
680 return ret;
681
682 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
683 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
684
685 /* WaDisableDopClockGating:bdw
686 *
687 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
688 * to disable EUTC clock gating.
689 */
690 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
691 DOP_CLOCK_GATING_DISABLE);
692
693 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
694 GEN8_SAMPLER_POWER_BYPASS_DIS);
695
696 WA_SET_BIT_MASKED(HDC_CHICKEN0,
697 /* WaForceContextSaveRestoreNonCoherent:bdw */
698 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
699 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
700 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
701
702 return 0;
703}
704
705static int chv_init_workarounds(struct intel_engine_cs *engine)
706{
707 struct drm_i915_private *dev_priv = engine->i915;
708 int ret;
709
710 ret = gen8_init_workarounds(engine);
711 if (ret)
712 return ret;
713
714 /* WaDisableThreadStallDopClockGating:chv */
715 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
716
717 /* Improve HiZ throughput on CHV. */
718 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
719
720 return 0;
721}
722
723static int gen9_init_workarounds(struct intel_engine_cs *engine)
724{
725 struct drm_i915_private *dev_priv = engine->i915;
726 int ret;
727
728 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk */
729 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
730
731 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk */
732 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
733 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
734
735 /* WaDisableKillLogic:bxt,skl,kbl */
736 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
737 ECOCHK_DIS_TLB);
738
739 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk */
740 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk */
741 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
742 FLOW_CONTROL_ENABLE |
743 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
744
745 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
746 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
747 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
748
749 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
750 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
751 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
752 GEN9_DG_MIRROR_FIX_ENABLE);
753
754 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
755 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
756 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
757 GEN9_RHWO_OPTIMIZATION_DISABLE);
758 /*
759 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
760 * but we do that in per ctx batchbuffer as there is an issue
761 * with this register not getting restored on ctx restore
762 */
763 }
764
765 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
766 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
767 GEN9_ENABLE_GPGPU_PREEMPTION);
768
769 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk */
770 /* WaDisablePartialResolveInVc:skl,bxt,kbl */
771 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
772 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
773
774 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk */
775 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
776 GEN9_CCS_TLB_PREFETCH_ENABLE);
777
778 /* WaDisableMaskBasedCammingInRCC:bxt */
779 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
780 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
781 PIXEL_MASK_CAMMING_DISABLE);
782
783 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
784 WA_SET_BIT_MASKED(HDC_CHICKEN0,
785 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
786 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
787
788 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
789 * both tied to WaForceContextSaveRestoreNonCoherent
790 * in some hsds for skl. We keep the tie for all gen9. The
791 * documentation is a bit hazy and so we want to get common behaviour,
792 * even though there is no clear evidence we would need both on kbl/bxt.
793 * This area has been source of system hangs so we play it safe
794 * and mimic the skl regardless of what bspec says.
795 *
796 * Use Force Non-Coherent whenever executing a 3D context. This
797 * is a workaround for a possible hang in the unlikely event
798 * a TLB invalidation occurs during a PSD flush.
799 */
800
801 /* WaForceEnableNonCoherent:skl,bxt,kbl */
802 WA_SET_BIT_MASKED(HDC_CHICKEN0,
803 HDC_FORCE_NON_COHERENT);
804
805 /* WaDisableHDCInvalidation:skl,bxt,kbl */
806 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
807 BDW_DISABLE_HDC_INVALIDATION);
808
809 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
810 if (IS_SKYLAKE(dev_priv) ||
811 IS_KABYLAKE(dev_priv) ||
812 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
813 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
814 GEN8_SAMPLER_POWER_BYPASS_DIS);
815
816 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk */
817 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
818
819 /* WaOCLCoherentLineFlush:skl,bxt,kbl */
820 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
821 GEN8_LQSC_FLUSH_COHERENT_LINES));
822
823 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk */
824 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
825 if (ret)
826 return ret;
827
828 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
829 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
830 if (ret)
831 return ret;
832
833 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk */
834 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
835 if (ret)
836 return ret;
837
838 return 0;
839}
840
841static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
842{
843 struct drm_i915_private *dev_priv = engine->i915;
844 u8 vals[3] = { 0, 0, 0 };
845 unsigned int i;
846
847 for (i = 0; i < 3; i++) {
848 u8 ss;
849
850 /*
851 * Only consider slices where one, and only one, subslice has 7
852 * EUs
853 */
854 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
855 continue;
856
857 /*
858 * subslice_7eu[i] != 0 (because of the check above) and
859 * ss_max == 4 (maximum number of subslices possible per slice)
860 *
861 * -> 0 <= ss <= 3;
862 */
863 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
864 vals[i] = 3 - ss;
865 }
866
867 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
868 return 0;
869
870 /* Tune IZ hashing. See intel_device_info_runtime_init() */
871 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
872 GEN9_IZ_HASHING_MASK(2) |
873 GEN9_IZ_HASHING_MASK(1) |
874 GEN9_IZ_HASHING_MASK(0),
875 GEN9_IZ_HASHING(2, vals[2]) |
876 GEN9_IZ_HASHING(1, vals[1]) |
877 GEN9_IZ_HASHING(0, vals[0]));
878
879 return 0;
880}
881
882static int skl_init_workarounds(struct intel_engine_cs *engine)
883{
884 struct drm_i915_private *dev_priv = engine->i915;
885 int ret;
886
887 ret = gen9_init_workarounds(engine);
888 if (ret)
889 return ret;
890
891 /*
892 * Actual WA is to disable percontext preemption granularity control
893 * until D0 which is the default case so this is equivalent to
894 * !WaDisablePerCtxtPreemptionGranularityControl:skl
895 */
896 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
897 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
898
899 /* WaEnableGapsTsvCreditFix:skl */
900 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
901 GEN9_GAPS_TSV_CREDIT_DISABLE));
902
903 /* WaDisableGafsUnitClkGating:skl */
904 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
905
906 /* WaInPlaceDecompressionHang:skl */
907 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
908 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
909 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
910
911 /* WaDisableLSQCROPERFforOCL:skl */
912 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
913 if (ret)
914 return ret;
915
916 return skl_tune_iz_hashing(engine);
917}
918
919static int bxt_init_workarounds(struct intel_engine_cs *engine)
920{
921 struct drm_i915_private *dev_priv = engine->i915;
922 int ret;
923
924 ret = gen9_init_workarounds(engine);
925 if (ret)
926 return ret;
927
928 /* WaStoreMultiplePTEenable:bxt */
929 /* This is a requirement according to Hardware specification */
930 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
931 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
932
933 /* WaSetClckGatingDisableMedia:bxt */
934 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
935 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
936 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
937 }
938
939 /* WaDisableThreadStallDopClockGating:bxt */
940 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
941 STALL_DOP_GATING_DISABLE);
942
943 /* WaDisablePooledEuLoadBalancingFix:bxt */
944 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
945 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
946 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
947 }
948
949 /* WaDisableSbeCacheDispatchPortSharing:bxt */
950 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
951 WA_SET_BIT_MASKED(
952 GEN7_HALF_SLICE_CHICKEN1,
953 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
954 }
955
956 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
957 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
958 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
959 /* WaDisableLSQCROPERFforOCL:bxt */
960 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
961 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
962 if (ret)
963 return ret;
964
965 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
966 if (ret)
967 return ret;
968 }
969
970 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
971 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
972 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
973 L3_HIGH_PRIO_CREDITS(2));
974
975 /* WaToEnableHwFixForPushConstHWBug:bxt */
976 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
977 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
978 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
979
980 /* WaInPlaceDecompressionHang:bxt */
981 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
982 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
983 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
984
985 return 0;
986}
987
988static int kbl_init_workarounds(struct intel_engine_cs *engine)
989{
990 struct drm_i915_private *dev_priv = engine->i915;
991 int ret;
992
993 ret = gen9_init_workarounds(engine);
994 if (ret)
995 return ret;
996
997 /* WaEnableGapsTsvCreditFix:kbl */
998 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
999 GEN9_GAPS_TSV_CREDIT_DISABLE));
1000
1001 /* WaDisableDynamicCreditSharing:kbl */
1002 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1003 WA_SET_BIT(GAMT_CHKN_BIT_REG,
1004 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1005
1006 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1007 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1008 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1009 HDC_FENCE_DEST_SLM_DISABLE);
1010
1011 /* WaToEnableHwFixForPushConstHWBug:kbl */
1012 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1013 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1014 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1015
1016 /* WaDisableGafsUnitClkGating:kbl */
1017 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1018
1019 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1020 WA_SET_BIT_MASKED(
1021 GEN7_HALF_SLICE_CHICKEN1,
1022 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1023
1024 /* WaInPlaceDecompressionHang:kbl */
1025 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1026 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1027
1028 /* WaDisableLSQCROPERFforOCL:kbl */
1029 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1030 if (ret)
1031 return ret;
1032
1033 return 0;
1034}
1035
1036static int glk_init_workarounds(struct intel_engine_cs *engine)
1037{
1038 struct drm_i915_private *dev_priv = engine->i915;
1039 int ret;
1040
1041 ret = gen9_init_workarounds(engine);
1042 if (ret)
1043 return ret;
1044
1045 /* WaToEnableHwFixForPushConstHWBug:glk */
1046 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1047 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1048
1049 return 0;
1050}
1051
1052int init_workarounds_ring(struct intel_engine_cs *engine)
1053{
1054 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson02e012f2017-03-01 12:11:31 +00001055 int err;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001056
1057 WARN_ON(engine->id != RCS);
1058
1059 dev_priv->workarounds.count = 0;
Chris Wilson02e012f2017-03-01 12:11:31 +00001060 dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001061
1062 if (IS_BROADWELL(dev_priv))
Chris Wilson02e012f2017-03-01 12:11:31 +00001063 err = bdw_init_workarounds(engine);
1064 else if (IS_CHERRYVIEW(dev_priv))
1065 err = chv_init_workarounds(engine);
1066 else if (IS_SKYLAKE(dev_priv))
1067 err = skl_init_workarounds(engine);
1068 else if (IS_BROXTON(dev_priv))
1069 err = bxt_init_workarounds(engine);
1070 else if (IS_KABYLAKE(dev_priv))
1071 err = kbl_init_workarounds(engine);
1072 else if (IS_GEMINILAKE(dev_priv))
1073 err = glk_init_workarounds(engine);
1074 else
1075 err = 0;
1076 if (err)
1077 return err;
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001078
Chris Wilson02e012f2017-03-01 12:11:31 +00001079 DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
1080 engine->name, dev_priv->workarounds.count);
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001081 return 0;
1082}
1083
1084int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
1085{
1086 struct i915_workarounds *w = &req->i915->workarounds;
1087 u32 *cs;
1088 int ret, i;
1089
1090 if (w->count == 0)
1091 return 0;
1092
1093 ret = req->engine->emit_flush(req, EMIT_BARRIER);
1094 if (ret)
1095 return ret;
1096
1097 cs = intel_ring_begin(req, (w->count * 2 + 2));
1098 if (IS_ERR(cs))
1099 return PTR_ERR(cs);
1100
1101 *cs++ = MI_LOAD_REGISTER_IMM(w->count);
1102 for (i = 0; i < w->count; i++) {
1103 *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
1104 *cs++ = w->reg[i].value;
1105 }
1106 *cs++ = MI_NOOP;
1107
1108 intel_ring_advance(req, cs);
1109
1110 ret = req->engine->emit_flush(req, EMIT_BARRIER);
1111 if (ret)
1112 return ret;
1113
Tvrtko Ursulin133b4bd2017-02-16 12:23:23 +00001114 return 0;
1115}
1116
Chris Wilson54003672017-03-03 12:19:46 +00001117/**
1118 * intel_engine_is_idle() - Report if the engine has finished process all work
1119 * @engine: the intel_engine_cs
1120 *
1121 * Return true if there are no requests pending, nothing left to be submitted
1122 * to hardware, and that the engine is idle.
1123 */
1124bool intel_engine_is_idle(struct intel_engine_cs *engine)
1125{
1126 struct drm_i915_private *dev_priv = engine->i915;
1127
1128 /* Any inflight/incomplete requests? */
1129 if (!i915_seqno_passed(intel_engine_get_seqno(engine),
1130 intel_engine_last_submit(engine)))
1131 return false;
1132
1133 /* Interrupt/tasklet pending? */
1134 if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
1135 return false;
1136
1137 /* Both ports drained, no more ELSP submission? */
1138 if (engine->execlist_port[0].request)
1139 return false;
1140
1141 /* Ring stopped? */
1142 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1143 return false;
1144
1145 return true;
1146}
1147
Chris Wilson05425242017-03-03 12:19:47 +00001148bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
1149{
1150 struct intel_engine_cs *engine;
1151 enum intel_engine_id id;
1152
Chris Wilson8490ae202017-03-30 15:50:37 +01001153 if (READ_ONCE(dev_priv->gt.active_requests))
1154 return false;
1155
1156 /* If the driver is wedged, HW state may be very inconsistent and
1157 * report that it is still busy, even though we have stopped using it.
1158 */
1159 if (i915_terminally_wedged(&dev_priv->gpu_error))
1160 return true;
1161
Chris Wilson05425242017-03-03 12:19:47 +00001162 for_each_engine(engine, dev_priv, id) {
1163 if (!intel_engine_is_idle(engine))
1164 return false;
1165 }
1166
1167 return true;
1168}
1169
Chris Wilsonff44ad52017-03-16 17:13:03 +00001170void intel_engines_reset_default_submission(struct drm_i915_private *i915)
1171{
1172 struct intel_engine_cs *engine;
1173 enum intel_engine_id id;
1174
1175 for_each_engine(engine, i915, id)
1176 engine->set_default_submission(engine);
1177}
1178
Chris Wilsonf97fbf92017-02-13 17:15:14 +00001179#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1180#include "selftests/mock_engine.c"
1181#endif