blob: 69a6416d12236db4042546e03856cae494c1f2c7 [file] [log] [blame]
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_drv.h"
26#include "intel_ringbuffer.h"
27#include "intel_lrc.h"
28
29static const struct engine_info {
30 const char *name;
31 unsigned exec_id;
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010032 enum intel_engine_hw_id hw_id;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010033 u32 mmio_base;
34 unsigned irq_shift;
35 int (*init_legacy)(struct intel_engine_cs *engine);
36 int (*init_execlists)(struct intel_engine_cs *engine);
37} intel_engines[] = {
38 [RCS] = {
39 .name = "render ring",
40 .exec_id = I915_EXEC_RENDER,
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010041 .hw_id = RCS_HW,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010042 .mmio_base = RENDER_RING_BASE,
43 .irq_shift = GEN8_RCS_IRQ_SHIFT,
44 .init_execlists = logical_render_ring_init,
45 .init_legacy = intel_init_render_ring_buffer,
46 },
47 [BCS] = {
48 .name = "blitter ring",
49 .exec_id = I915_EXEC_BLT,
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010050 .hw_id = BCS_HW,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010051 .mmio_base = BLT_RING_BASE,
52 .irq_shift = GEN8_BCS_IRQ_SHIFT,
53 .init_execlists = logical_xcs_ring_init,
54 .init_legacy = intel_init_blt_ring_buffer,
55 },
56 [VCS] = {
57 .name = "bsd ring",
58 .exec_id = I915_EXEC_BSD,
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010059 .hw_id = VCS_HW,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010060 .mmio_base = GEN6_BSD_RING_BASE,
61 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
62 .init_execlists = logical_xcs_ring_init,
63 .init_legacy = intel_init_bsd_ring_buffer,
64 },
65 [VCS2] = {
66 .name = "bsd2 ring",
67 .exec_id = I915_EXEC_BSD,
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010068 .hw_id = VCS2_HW,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010069 .mmio_base = GEN8_BSD2_RING_BASE,
70 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
71 .init_execlists = logical_xcs_ring_init,
72 .init_legacy = intel_init_bsd2_ring_buffer,
73 },
74 [VECS] = {
75 .name = "video enhancement ring",
76 .exec_id = I915_EXEC_VEBOX,
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +010077 .hw_id = VECS_HW,
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010078 .mmio_base = VEBOX_RING_BASE,
79 .irq_shift = GEN8_VECS_IRQ_SHIFT,
80 .init_execlists = logical_xcs_ring_init,
81 .init_legacy = intel_init_vebox_ring_buffer,
82 },
83};
84
Akash Goel3b3f1652016-10-13 22:44:48 +053085static int
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010086intel_engine_setup(struct drm_i915_private *dev_priv,
87 enum intel_engine_id id)
88{
89 const struct engine_info *info = &intel_engines[id];
Akash Goel3b3f1652016-10-13 22:44:48 +053090 struct intel_engine_cs *engine;
91
92 GEM_BUG_ON(dev_priv->engine[id]);
93 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
94 if (!engine)
95 return -ENOMEM;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +010096
97 engine->id = id;
98 engine->i915 = dev_priv;
99 engine->name = info->name;
100 engine->exec_id = info->exec_id;
Tvrtko Ursulin5ec2cf72016-08-16 17:04:20 +0100101 engine->hw_id = engine->guc_id = info->hw_id;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100102 engine->mmio_base = info->mmio_base;
103 engine->irq_shift = info->irq_shift;
104
Chris Wilson0de91362016-11-14 20:41:01 +0000105 /* Nothing to do here, execute in order of dependencies */
106 engine->schedule = NULL;
107
Akash Goel3b3f1652016-10-13 22:44:48 +0530108 dev_priv->engine[id] = engine;
109 return 0;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100110}
111
112/**
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000113 * intel_engines_init_early() - allocate the Engine Command Streamers
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +0000114 * @dev_priv: i915 device private
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100115 *
116 * Return: non-zero if the initialization failed.
117 */
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000118int intel_engines_init_early(struct drm_i915_private *dev_priv)
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100119{
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100120 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
Tvrtko Ursulin70006ad2016-10-13 11:02:56 +0100121 unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100122 unsigned int mask = 0;
Akash Goel3b3f1652016-10-13 22:44:48 +0530123 struct intel_engine_cs *engine;
124 enum intel_engine_id id;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100125 unsigned int i;
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000126 int err;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100127
Tvrtko Ursulin70006ad2016-10-13 11:02:56 +0100128 WARN_ON(ring_mask == 0);
129 WARN_ON(ring_mask &
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100130 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
131
132 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
133 if (!HAS_ENGINE(dev_priv, i))
134 continue;
135
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000136 err = intel_engine_setup(dev_priv, i);
137 if (err)
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100138 goto cleanup;
139
140 mask |= ENGINE_MASK(i);
141 }
142
143 /*
144 * Catch failures to update intel_engines table when the new engines
145 * are added to the driver by a warning and disabling the forgotten
146 * engines.
147 */
Tvrtko Ursulin70006ad2016-10-13 11:02:56 +0100148 if (WARN_ON(mask != ring_mask))
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100149 device_info->ring_mask = mask;
150
151 device_info->num_rings = hweight32(mask);
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100152
153 return 0;
154
155cleanup:
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000156 for_each_engine(engine, dev_priv, id)
157 kfree(engine);
158 return err;
159}
160
161/**
162 * intel_engines_init() - allocate, populate and init the Engine Command Streamers
163 * @dev_priv: i915 device private
164 *
165 * Return: non-zero if the initialization failed.
166 */
167int intel_engines_init(struct drm_i915_private *dev_priv)
168{
169 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
170 struct intel_engine_cs *engine;
171 enum intel_engine_id id, err_id;
172 unsigned int mask = 0;
173 int err = 0;
174
Akash Goel3b3f1652016-10-13 22:44:48 +0530175 for_each_engine(engine, dev_priv, id) {
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000176 int (*init)(struct intel_engine_cs *engine);
177
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100178 if (i915.enable_execlists)
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000179 init = intel_engines[id].init_execlists;
180 else
181 init = intel_engines[id].init_legacy;
182 if (!init) {
183 kfree(engine);
184 dev_priv->engine[id] = NULL;
185 continue;
186 }
187
188 err = init(engine);
189 if (err) {
190 err_id = id;
191 goto cleanup;
192 }
193
194 mask |= ENGINE_MASK(id);
195 }
196
197 /*
198 * Catch failures to update intel_engines table when the new engines
199 * are added to the driver by a warning and disabling the forgotten
200 * engines.
201 */
202 if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
203 device_info->ring_mask = mask;
204
205 device_info->num_rings = hweight32(mask);
206
207 return 0;
208
209cleanup:
210 for_each_engine(engine, dev_priv, id) {
211 if (id >= err_id)
212 kfree(engine);
213 else if (i915.enable_execlists)
Akash Goel3b3f1652016-10-13 22:44:48 +0530214 intel_logical_ring_cleanup(engine);
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100215 else
Akash Goel3b3f1652016-10-13 22:44:48 +0530216 intel_engine_cleanup(engine);
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100217 }
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000218 return err;
Tvrtko Ursulin88d2ba22016-07-13 16:03:40 +0100219}
220
Chris Wilson73cb9702016-10-28 13:58:46 +0100221void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
Chris Wilson57f275a2016-08-15 10:49:00 +0100222{
223 struct drm_i915_private *dev_priv = engine->i915;
224
225 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
226 * so long as the semaphore value in the register/page is greater
227 * than the sync value), so whenever we reset the seqno,
228 * so long as we reset the tracking semaphore value to 0, it will
229 * always be before the next request's seqno. If we don't reset
230 * the semaphore value, then when the seqno moves backwards all
231 * future waits will complete instantly (causing rendering corruption).
232 */
233 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
234 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
235 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
236 if (HAS_VEBOX(dev_priv))
237 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
238 }
Chris Wilson51d545d2016-08-15 10:49:02 +0100239 if (dev_priv->semaphore) {
240 struct page *page = i915_vma_first_page(dev_priv->semaphore);
241 void *semaphores;
242
243 /* Semaphores are in noncoherent memory, flush to be safe */
244 semaphores = kmap(page);
Chris Wilson57f275a2016-08-15 10:49:00 +0100245 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
246 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
Chris Wilson51d545d2016-08-15 10:49:02 +0100247 drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
248 I915_NUM_ENGINES * gen8_semaphore_seqno_size);
Chris Wilson57f275a2016-08-15 10:49:00 +0100249 kunmap(page);
250 }
Chris Wilson57f275a2016-08-15 10:49:00 +0100251
252 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
253 if (engine->irq_seqno_barrier)
254 engine->irq_seqno_barrier(engine);
Chris Wilson73cb9702016-10-28 13:58:46 +0100255
256 GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
257 engine->timeline->last_submitted_seqno = seqno;
Chris Wilson57f275a2016-08-15 10:49:00 +0100258
259 engine->hangcheck.seqno = seqno;
260
261 /* After manually advancing the seqno, fake the interrupt in case
262 * there are any waiters for that seqno.
263 */
264 intel_engine_wakeup(engine);
265}
266
Chris Wilson73cb9702016-10-28 13:58:46 +0100267static void intel_engine_init_timeline(struct intel_engine_cs *engine)
Chris Wilsondcff85c2016-08-05 10:14:11 +0100268{
Chris Wilson73cb9702016-10-28 13:58:46 +0100269 engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
Chris Wilsondcff85c2016-08-05 10:14:11 +0100270}
271
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100272/**
273 * intel_engines_setup_common - setup engine state not requiring hw access
274 * @engine: Engine to setup.
275 *
276 * Initializes @engine@ structure members shared between legacy and execlists
277 * submission modes which do not require hardware access.
278 *
279 * Typically done early in the submission mode specific engine setup stage.
280 */
281void intel_engine_setup_common(struct intel_engine_cs *engine)
282{
Chris Wilson20311bd2016-11-14 20:41:03 +0000283 engine->execlist_queue = RB_ROOT;
284 engine->execlist_first = NULL;
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100285
Chris Wilson73cb9702016-10-28 13:58:46 +0100286 intel_engine_init_timeline(engine);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100287 intel_engine_init_hangcheck(engine);
Chris Wilson115003e92016-08-04 16:32:19 +0100288 i915_gem_batch_pool_init(engine, &engine->batch_pool);
Chris Wilson7756e452016-08-18 17:17:10 +0100289
290 intel_engine_init_cmd_parser(engine);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100291}
292
Chris Wilsonadc320c2016-08-15 10:48:59 +0100293int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
294{
295 struct drm_i915_gem_object *obj;
296 struct i915_vma *vma;
297 int ret;
298
299 WARN_ON(engine->scratch);
300
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000301 obj = i915_gem_object_create_stolen(engine->i915, size);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100302 if (!obj)
Chris Wilson920cf412016-10-28 13:58:30 +0100303 obj = i915_gem_object_create_internal(engine->i915, size);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100304 if (IS_ERR(obj)) {
305 DRM_ERROR("Failed to allocate scratch page\n");
306 return PTR_ERR(obj);
307 }
308
Chris Wilsona01cb372017-01-16 15:21:30 +0000309 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100310 if (IS_ERR(vma)) {
311 ret = PTR_ERR(vma);
312 goto err_unref;
313 }
314
315 ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
316 if (ret)
317 goto err_unref;
318
319 engine->scratch = vma;
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100320 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
321 engine->name, i915_ggtt_offset(vma));
Chris Wilsonadc320c2016-08-15 10:48:59 +0100322 return 0;
323
324err_unref:
325 i915_gem_object_put(obj);
326 return ret;
327}
328
329static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
330{
Chris Wilson19880c42016-08-15 10:49:05 +0100331 i915_vma_unpin_and_release(&engine->scratch);
Chris Wilsonadc320c2016-08-15 10:48:59 +0100332}
333
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100334/**
335 * intel_engines_init_common - initialize cengine state which might require hw access
336 * @engine: Engine to initialize.
337 *
338 * Initializes @engine@ structure members shared between legacy and execlists
339 * submission modes which do require hardware access.
340 *
341 * Typcally done at later stages of submission mode specific engine setup.
342 *
343 * Returns zero on success or an error code on failure.
344 */
345int intel_engine_init_common(struct intel_engine_cs *engine)
346{
347 int ret;
348
Chris Wilsone8a9c582016-12-18 15:37:20 +0000349 /* We may need to do things with the shrinker which
350 * require us to immediately switch back to the default
351 * context. This can cause a problem as pinning the
352 * default context also requires GTT space which may not
353 * be available. To avoid this we always pin the default
354 * context.
355 */
356 ret = engine->context_pin(engine, engine->i915->kernel_context);
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100357 if (ret)
358 return ret;
359
Chris Wilsone8a9c582016-12-18 15:37:20 +0000360 ret = intel_engine_init_breadcrumbs(engine);
361 if (ret)
362 goto err_unpin;
363
Chris Wilson4e50f082016-10-28 13:58:31 +0100364 ret = i915_gem_render_state_init(engine);
365 if (ret)
Chris Wilsone8a9c582016-12-18 15:37:20 +0000366 goto err_unpin;
Chris Wilson4e50f082016-10-28 13:58:31 +0100367
Chris Wilson7756e452016-08-18 17:17:10 +0100368 return 0;
Chris Wilsone8a9c582016-12-18 15:37:20 +0000369
370err_unpin:
371 engine->context_unpin(engine, engine->i915->kernel_context);
372 return ret;
Tvrtko Ursulin019bf272016-07-13 16:03:41 +0100373}
Chris Wilson96a945a2016-08-03 13:19:16 +0100374
375/**
376 * intel_engines_cleanup_common - cleans up the engine state created by
377 * the common initiailizers.
378 * @engine: Engine to cleanup.
379 *
380 * This cleans up everything created by the common helpers.
381 */
382void intel_engine_cleanup_common(struct intel_engine_cs *engine)
383{
Chris Wilsonadc320c2016-08-15 10:48:59 +0100384 intel_engine_cleanup_scratch(engine);
385
Chris Wilson4e50f082016-10-28 13:58:31 +0100386 i915_gem_render_state_fini(engine);
Chris Wilson96a945a2016-08-03 13:19:16 +0100387 intel_engine_fini_breadcrumbs(engine);
Chris Wilson7756e452016-08-18 17:17:10 +0100388 intel_engine_cleanup_cmd_parser(engine);
Chris Wilson96a945a2016-08-03 13:19:16 +0100389 i915_gem_batch_pool_fini(&engine->batch_pool);
Chris Wilsone8a9c582016-12-18 15:37:20 +0000390
391 engine->context_unpin(engine, engine->i915->kernel_context);
Chris Wilson96a945a2016-08-03 13:19:16 +0100392}
Chris Wilson1b365952016-10-04 21:11:31 +0100393
394u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
395{
396 struct drm_i915_private *dev_priv = engine->i915;
397 u64 acthd;
398
399 if (INTEL_GEN(dev_priv) >= 8)
400 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
401 RING_ACTHD_UDW(engine->mmio_base));
402 else if (INTEL_GEN(dev_priv) >= 4)
403 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
404 else
405 acthd = I915_READ(ACTHD);
406
407 return acthd;
408}
409
410u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
411{
412 struct drm_i915_private *dev_priv = engine->i915;
413 u64 bbaddr;
414
415 if (INTEL_GEN(dev_priv) >= 8)
416 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
417 RING_BBADDR_UDW(engine->mmio_base));
418 else
419 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
420
421 return bbaddr;
422}
Chris Wilson0e704472016-10-12 10:05:17 +0100423
424const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
425{
426 switch (type) {
427 case I915_CACHE_NONE: return " uncached";
428 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
429 case I915_CACHE_L3_LLC: return " L3+LLC";
430 case I915_CACHE_WT: return " WT";
431 default: return "";
432 }
433}
434
435static inline uint32_t
436read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
437 int subslice, i915_reg_t reg)
438{
439 uint32_t mcr;
440 uint32_t ret;
441 enum forcewake_domains fw_domains;
442
443 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
444 FW_REG_READ);
445 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
446 GEN8_MCR_SELECTOR,
447 FW_REG_READ | FW_REG_WRITE);
448
449 spin_lock_irq(&dev_priv->uncore.lock);
450 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
451
452 mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
453 /*
454 * The HW expects the slice and sublice selectors to be reset to 0
455 * after reading out the registers.
456 */
457 WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
458 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
459 mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
460 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
461
462 ret = I915_READ_FW(reg);
463
464 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
465 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
466
467 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
468 spin_unlock_irq(&dev_priv->uncore.lock);
469
470 return ret;
471}
472
473/* NB: please notice the memset */
474void intel_engine_get_instdone(struct intel_engine_cs *engine,
475 struct intel_instdone *instdone)
476{
477 struct drm_i915_private *dev_priv = engine->i915;
478 u32 mmio_base = engine->mmio_base;
479 int slice;
480 int subslice;
481
482 memset(instdone, 0, sizeof(*instdone));
483
484 switch (INTEL_GEN(dev_priv)) {
485 default:
486 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
487
488 if (engine->id != RCS)
489 break;
490
491 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
492 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
493 instdone->sampler[slice][subslice] =
494 read_subslice_reg(dev_priv, slice, subslice,
495 GEN7_SAMPLER_INSTDONE);
496 instdone->row[slice][subslice] =
497 read_subslice_reg(dev_priv, slice, subslice,
498 GEN7_ROW_INSTDONE);
499 }
500 break;
501 case 7:
502 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
503
504 if (engine->id != RCS)
505 break;
506
507 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
508 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
509 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
510
511 break;
512 case 6:
513 case 5:
514 case 4:
515 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
516
517 if (engine->id == RCS)
518 /* HACK: Using the wrong struct member */
519 instdone->slice_common = I915_READ(GEN4_INSTDONE1);
520 break;
521 case 3:
522 case 2:
523 instdone->instdone = I915_READ(GEN2_INSTDONE);
524 break;
525 }
526}