blob: 770bac462441f6d9d3e14733ac52e080461fdf37 [file] [log] [blame]
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_drv.h"
26#include "intel_uc.h"
Michal Wajdeczko9f436c42017-10-04 18:13:40 +000027#include "i915_guc_submission.h"
Arkadiusz Hiler4c0fed72017-03-14 15:28:08 +010028#include <linux/firmware.h>
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +010029
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +010030/* Reset GuC providing us with fresh state for both GuC and HuC.
31 */
32static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
33{
34 int ret;
35 u32 guc_status;
36
37 ret = intel_guc_reset(dev_priv);
38 if (ret) {
39 DRM_ERROR("GuC reset failed, ret = %d\n", ret);
40 return ret;
41 }
42
43 guc_status = I915_READ(GUC_STATUS);
44 WARN(!(guc_status & GS_MIA_IN_RESET),
45 "GuC status: 0x%x, MIA core expected to be in reset\n",
46 guc_status);
47
48 return ret;
49}
50
Arkadiusz Hilerd2be9f22017-03-14 15:28:10 +010051void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
52{
53 if (!HAS_GUC(dev_priv)) {
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000054 if (i915_modparams.enable_guc_loading > 0 ||
55 i915_modparams.enable_guc_submission > 0)
Michal Wajdeczkod4a70a12017-03-15 13:37:41 +000056 DRM_INFO("Ignoring GuC options, no hardware\n");
Arkadiusz Hilerd2be9f22017-03-14 15:28:10 +010057
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000058 i915_modparams.enable_guc_loading = 0;
59 i915_modparams.enable_guc_submission = 0;
Michal Wajdeczkod4a70a12017-03-15 13:37:41 +000060 return;
Arkadiusz Hilerd2be9f22017-03-14 15:28:10 +010061 }
Arkadiusz Hilerb551f612017-03-14 15:28:13 +010062
Michal Wajdeczkod4a70a12017-03-15 13:37:41 +000063 /* A negative value means "use platform default" */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000064 if (i915_modparams.enable_guc_loading < 0)
65 i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
Michal Wajdeczkod4a70a12017-03-15 13:37:41 +000066
67 /* Verify firmware version */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000068 if (i915_modparams.enable_guc_loading) {
Arkadiusz Hilerb551f612017-03-14 15:28:13 +010069 if (HAS_HUC_UCODE(dev_priv))
70 intel_huc_select_fw(&dev_priv->huc);
71
72 if (intel_guc_select_fw(&dev_priv->guc))
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000073 i915_modparams.enable_guc_loading = 0;
Arkadiusz Hilerb551f612017-03-14 15:28:13 +010074 }
Michal Wajdeczkod4a70a12017-03-15 13:37:41 +000075
76 /* Can't enable guc submission without guc loaded */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000077 if (!i915_modparams.enable_guc_loading)
78 i915_modparams.enable_guc_submission = 0;
Michal Wajdeczkod4a70a12017-03-15 13:37:41 +000079
80 /* A negative value means "use platform default" */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000081 if (i915_modparams.enable_guc_submission < 0)
82 i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
Arkadiusz Hilerd2be9f22017-03-14 15:28:10 +010083}
84
Michal Wajdeczkoccba5912017-08-09 21:26:03 +000085static void gen8_guc_raise_irq(struct intel_guc *guc)
Michal Wajdeczkoa03aac42017-05-10 12:59:26 +000086{
87 struct drm_i915_private *dev_priv = guc_to_i915(guc);
88
89 I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
90}
91
Michal Wajdeczko3af7a9c2017-10-04 15:33:27 +000092static void guc_init_early(struct intel_guc *guc)
Arkadiusz Hiler413e8fd2016-11-25 18:59:36 +010093{
Michal Wajdeczkof8a58d62017-05-26 11:13:25 +000094 intel_guc_ct_init_early(&guc->ct);
95
Oscar Mateo5e7cd372017-03-22 10:39:49 -070096 mutex_init(&guc->send_mutex);
Michal Wajdeczko789a6252017-05-02 10:32:42 +000097 guc->send = intel_guc_send_nop;
Michal Wajdeczkoccba5912017-08-09 21:26:03 +000098 guc->notify = gen8_guc_raise_irq;
Arkadiusz Hiler413e8fd2016-11-25 18:59:36 +010099}
100
Michal Wajdeczko3af7a9c2017-10-04 15:33:27 +0000101void intel_uc_init_early(struct drm_i915_private *dev_priv)
102{
103 guc_init_early(&dev_priv->guc);
104}
105
Arkadiusz Hiler29ad6a32017-03-14 15:28:09 +0100106void intel_uc_init_fw(struct drm_i915_private *dev_priv)
107{
Michal Wajdeczkoa16b4312017-10-04 15:33:25 +0000108 intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
109 intel_uc_fw_fetch(dev_priv, &dev_priv->guc.fw);
Arkadiusz Hiler29ad6a32017-03-14 15:28:09 +0100110}
111
Oscar Mateo3950bf32017-03-22 10:39:46 -0700112void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
113{
Michal Wajdeczkoa16b4312017-10-04 15:33:25 +0000114 intel_uc_fw_fini(&dev_priv->guc.fw);
115 intel_uc_fw_fini(&dev_priv->huc.fw);
Oscar Mateo3950bf32017-03-22 10:39:46 -0700116}
117
Michal Wajdeczkoa0c1fe22017-05-10 12:59:27 +0000118static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
119{
120 GEM_BUG_ON(!guc->send_regs.base);
121 GEM_BUG_ON(!guc->send_regs.count);
122 GEM_BUG_ON(i >= guc->send_regs.count);
123
124 return _MMIO(guc->send_regs.base + 4 * i);
125}
126
127static void guc_init_send_regs(struct intel_guc *guc)
128{
129 struct drm_i915_private *dev_priv = guc_to_i915(guc);
130 enum forcewake_domains fw_domains = 0;
131 unsigned int i;
132
133 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
134 guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
135
136 for (i = 0; i < guc->send_regs.count; i++) {
137 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
138 guc_send_reg(guc, i),
139 FW_REG_READ | FW_REG_WRITE);
140 }
141 guc->send_regs.fw_domains = fw_domains;
142}
143
Sagar Arun Kamble1fc556f2017-10-04 15:33:24 +0000144/**
145 * intel_uc_init_mmio - setup uC MMIO access
146 *
147 * @dev_priv: device private
148 *
149 * Setup minimal state necessary for MMIO accesses later in the
150 * initialization sequence.
151 */
152void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
153{
154 guc_init_send_regs(&dev_priv->guc);
155}
156
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -0700157static void guc_capture_load_err_log(struct intel_guc *guc)
158{
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000159 if (!guc->log.vma || i915_modparams.guc_log_level < 0)
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -0700160 return;
161
162 if (!guc->load_err_log)
163 guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
164
165 return;
166}
167
168static void guc_free_load_err_log(struct intel_guc *guc)
169{
170 if (guc->load_err_log)
171 i915_gem_object_put(guc->load_err_log);
172}
173
Michal Wajdeczko789a6252017-05-02 10:32:42 +0000174static int guc_enable_communication(struct intel_guc *guc)
175{
Michal Wajdeczkof8a58d62017-05-26 11:13:25 +0000176 struct drm_i915_private *dev_priv = guc_to_i915(guc);
177
Michal Wajdeczkof8a58d62017-05-26 11:13:25 +0000178 if (HAS_GUC_CT(dev_priv))
179 return intel_guc_enable_ct(guc);
180
Michal Wajdeczko789a6252017-05-02 10:32:42 +0000181 guc->send = intel_guc_send_mmio;
182 return 0;
183}
184
185static void guc_disable_communication(struct intel_guc *guc)
186{
Michal Wajdeczkof8a58d62017-05-26 11:13:25 +0000187 struct drm_i915_private *dev_priv = guc_to_i915(guc);
188
189 if (HAS_GUC_CT(dev_priv))
190 intel_guc_disable_ct(guc);
191
Michal Wajdeczko789a6252017-05-02 10:32:42 +0000192 guc->send = intel_guc_send_nop;
193}
194
Sagar Arun Kamble9a2cbf22017-09-26 12:47:16 +0530195/**
196 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
197 * @guc: intel_guc structure
198 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
199 *
200 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
201 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
202 * intel_huc_auth().
203 *
204 * Return: non-zero code on error
205 */
206int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
207{
208 u32 action[] = {
209 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
210 rsa_offset
211 };
212
213 return intel_guc_send(guc, action, ARRAY_SIZE(action));
214}
215
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100216int intel_uc_init_hw(struct drm_i915_private *dev_priv)
217{
Michal Wajdeczko789a6252017-05-02 10:32:42 +0000218 struct intel_guc *guc = &dev_priv->guc;
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100219 int ret, attempts;
220
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000221 if (!i915_modparams.enable_guc_loading)
Oscar Mateob8991402017-03-28 09:53:47 -0700222 return 0;
223
Michal Wajdeczko789a6252017-05-02 10:32:42 +0000224 guc_disable_communication(guc);
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100225 gen9_reset_guc_interrupts(dev_priv);
226
227 /* We need to notify the guc whenever we change the GGTT */
228 i915_ggtt_enable_guc(dev_priv);
229
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000230 if (i915_modparams.enable_guc_submission) {
Oscar Mateo397fce82017-03-22 10:39:52 -0700231 /*
232 * This is stuff we need to have available at fw load time
233 * if we are planning to enable submission later
234 */
235 ret = i915_guc_submission_init(dev_priv);
236 if (ret)
237 goto err_guc;
238 }
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100239
daniele.ceraolospurio@intel.com13f6c712017-04-06 17:18:52 -0700240 /* init WOPCM */
241 I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
242 I915_WRITE(DMA_GUC_WOPCM_OFFSET,
243 GUC_WOPCM_OFFSET_VALUE | HUC_LOADING_AGENT_GUC);
244
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100245 /* WaEnableuKernelHeaderValidFix:skl */
246 /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
247 if (IS_GEN9(dev_priv))
248 attempts = 3;
249 else
250 attempts = 1;
251
252 while (attempts--) {
253 /*
254 * Always reset the GuC just before (re)loading, so
255 * that the state and timing are fairly predictable
256 */
257 ret = __intel_uc_reset_hw(dev_priv);
258 if (ret)
259 goto err_submission;
260
261 intel_huc_init_hw(&dev_priv->huc);
262 ret = intel_guc_init_hw(&dev_priv->guc);
263 if (ret == 0 || ret != -EAGAIN)
264 break;
265
266 DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
267 "retry %d more time(s)\n", ret, attempts);
268 }
269
270 /* Did we succeded or run out of retries? */
271 if (ret)
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -0700272 goto err_log_capture;
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100273
Michal Wajdeczko789a6252017-05-02 10:32:42 +0000274 ret = guc_enable_communication(guc);
275 if (ret)
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -0700276 goto err_log_capture;
Michal Wajdeczko789a6252017-05-02 10:32:42 +0000277
Sagar Arun Kamble9a2cbf22017-09-26 12:47:16 +0530278 intel_huc_auth(&dev_priv->huc);
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000279 if (i915_modparams.enable_guc_submission) {
280 if (i915_modparams.guc_log_level >= 0)
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100281 gen9_enable_guc_interrupts(dev_priv);
282
283 ret = i915_guc_submission_enable(dev_priv);
284 if (ret)
Oscar Mateo3950bf32017-03-22 10:39:46 -0700285 goto err_interrupts;
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100286 }
287
288 return 0;
289
290 /*
291 * We've failed to load the firmware :(
292 *
293 * Decide whether to disable GuC submission and fall back to
294 * execlist mode, and whether to hide the error by returning
295 * zero or to return -EIO, which the caller will treat as a
296 * nonfatal error (i.e. it doesn't prevent driver load, but
297 * marks the GPU as wedged until reset).
298 */
Oscar Mateo3950bf32017-03-22 10:39:46 -0700299err_interrupts:
Michal Wajdeczko789a6252017-05-02 10:32:42 +0000300 guc_disable_communication(guc);
Oscar Mateo3950bf32017-03-22 10:39:46 -0700301 gen9_disable_guc_interrupts(dev_priv);
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -0700302err_log_capture:
303 guc_capture_load_err_log(guc);
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100304err_submission:
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000305 if (i915_modparams.enable_guc_submission)
Oscar Mateo397fce82017-03-22 10:39:52 -0700306 i915_guc_submission_fini(dev_priv);
Oscar Mateo3950bf32017-03-22 10:39:46 -0700307err_guc:
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100308 i915_ggtt_disable_guc(dev_priv);
309
310 DRM_ERROR("GuC init failed\n");
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000311 if (i915_modparams.enable_guc_loading > 1 ||
312 i915_modparams.enable_guc_submission > 1)
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100313 ret = -EIO;
314 else
315 ret = 0;
316
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000317 if (i915_modparams.enable_guc_submission) {
318 i915_modparams.enable_guc_submission = 0;
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100319 DRM_NOTE("Falling back from GuC submission to execlist mode\n");
320 }
321
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000322 i915_modparams.enable_guc_loading = 0;
Michel Thierryc4a89522017-06-05 10:12:51 -0700323 DRM_NOTE("GuC firmware loading disabled\n");
324
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100325 return ret;
326}
327
Oscar Mateo3950bf32017-03-22 10:39:46 -0700328void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
329{
Michel Thierryc4a89522017-06-05 10:12:51 -0700330 guc_free_load_err_log(&dev_priv->guc);
331
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000332 if (!i915_modparams.enable_guc_loading)
Oscar Mateob8991402017-03-28 09:53:47 -0700333 return;
334
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000335 if (i915_modparams.enable_guc_submission)
Oscar Mateo3950bf32017-03-22 10:39:46 -0700336 i915_guc_submission_disable(dev_priv);
Michal Wajdeczko2f640852017-05-26 11:13:24 +0000337
338 guc_disable_communication(&dev_priv->guc);
339
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000340 if (i915_modparams.enable_guc_submission) {
Oscar Mateo3950bf32017-03-22 10:39:46 -0700341 gen9_disable_guc_interrupts(dev_priv);
Oscar Mateo397fce82017-03-22 10:39:52 -0700342 i915_guc_submission_fini(dev_priv);
Oscar Mateo3950bf32017-03-22 10:39:46 -0700343 }
Michal Wajdeczko2f640852017-05-26 11:13:24 +0000344
Oscar Mateo3950bf32017-03-22 10:39:46 -0700345 i915_ggtt_disable_guc(dev_priv);
346}
347
Michal Wajdeczko789a6252017-05-02 10:32:42 +0000348int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
349{
350 WARN(1, "Unexpected send: action=%#x\n", *action);
351 return -ENODEV;
352}
353
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100354/*
Oscar Mateo5e7cd372017-03-22 10:39:49 -0700355 * This function implements the MMIO based host to GuC interface.
356 */
357int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100358{
359 struct drm_i915_private *dev_priv = guc_to_i915(guc);
360 u32 status;
361 int i;
362 int ret;
363
Michal Wajdeczkoa0c1fe22017-05-10 12:59:27 +0000364 GEM_BUG_ON(!len);
365 GEM_BUG_ON(len > guc->send_regs.count);
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100366
Michal Wajdeczkof8a58d62017-05-26 11:13:25 +0000367 /* If CT is available, we expect to use MMIO only during init/fini */
368 GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
369 *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
370 *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
371
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100372 mutex_lock(&guc->send_mutex);
Michal Wajdeczkoa0c1fe22017-05-10 12:59:27 +0000373 intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100374
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100375 for (i = 0; i < len; i++)
Michal Wajdeczkoa0c1fe22017-05-10 12:59:27 +0000376 I915_WRITE(guc_send_reg(guc, i), action[i]);
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100377
Michal Wajdeczkoa0c1fe22017-05-10 12:59:27 +0000378 POSTING_READ(guc_send_reg(guc, i - 1));
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100379
Michal Wajdeczkoa03aac42017-05-10 12:59:26 +0000380 intel_guc_notify(guc);
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100381
382 /*
Michal Wajdeczkobea4e4a2017-04-07 16:01:45 +0000383 * No GuC command should ever take longer than 10ms.
384 * Fast commands should still complete in 10us.
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100385 */
Michal Wajdeczkobea4e4a2017-04-07 16:01:45 +0000386 ret = __intel_wait_for_register_fw(dev_priv,
Michal Wajdeczkoa0c1fe22017-05-10 12:59:27 +0000387 guc_send_reg(guc, 0),
Michal Wajdeczkobea4e4a2017-04-07 16:01:45 +0000388 INTEL_GUC_RECV_MASK,
389 INTEL_GUC_RECV_MASK,
390 10, 10, &status);
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100391 if (status != INTEL_GUC_STATUS_SUCCESS) {
392 /*
393 * Either the GuC explicitly returned an error (which
394 * we convert to -EIO here) or no response at all was
395 * received within the timeout limit (-ETIMEDOUT)
396 */
397 if (ret != -ETIMEDOUT)
398 ret = -EIO;
399
400 DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
401 " ret=%d status=0x%08X response=0x%08X\n",
402 action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100403 }
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100404
Michal Wajdeczkoa0c1fe22017-05-10 12:59:27 +0000405 intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100406 mutex_unlock(&guc->send_mutex);
407
408 return ret;
409}
410
411int intel_guc_sample_forcewake(struct intel_guc *guc)
412{
413 struct drm_i915_private *dev_priv = guc_to_i915(guc);
414 u32 action[2];
415
416 action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
417 /* WaRsDisableCoarsePowerGating:skl,bxt */
418 if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
419 action[1] = 0;
420 else
421 /* bit 0 and 1 are for Render and Media domain separately */
422 action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
423
424 return intel_guc_send(guc, action, ARRAY_SIZE(action));
425}