Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2014 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Vinit Azad <vinit.azad@intel.com> |
| 25 | * Ben Widawsky <ben@bwidawsk.net> |
| 26 | * Dave Gordon <david.s.gordon@intel.com> |
| 27 | * Alex Dai <yu.dai@intel.com> |
| 28 | */ |
Michal Wajdeczko | e8668bb | 2017-10-16 14:47:14 +0000 | [diff] [blame] | 29 | |
| 30 | #include "intel_guc_fw.h" |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 31 | #include "i915_drv.h" |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 32 | |
Tvrtko Ursulin | 5e334c1 | 2016-08-10 16:16:46 +0100 | [diff] [blame] | 33 | #define SKL_FW_MAJOR 6 |
| 34 | #define SKL_FW_MINOR 1 |
| 35 | |
| 36 | #define BXT_FW_MAJOR 8 |
| 37 | #define BXT_FW_MINOR 7 |
| 38 | |
| 39 | #define KBL_FW_MAJOR 9 |
| 40 | #define KBL_FW_MINOR 14 |
| 41 | |
Anusha Srivatsa | 90f192c | 2017-03-30 13:24:06 -0700 | [diff] [blame] | 42 | #define GLK_FW_MAJOR 10 |
| 43 | #define GLK_FW_MINOR 56 |
| 44 | |
Tvrtko Ursulin | 5e334c1 | 2016-08-10 16:16:46 +0100 | [diff] [blame] | 45 | #define GUC_FW_PATH(platform, major, minor) \ |
| 46 | "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin" |
| 47 | |
| 48 | #define I915_SKL_GUC_UCODE GUC_FW_PATH(skl, SKL_FW_MAJOR, SKL_FW_MINOR) |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 49 | MODULE_FIRMWARE(I915_SKL_GUC_UCODE); |
| 50 | |
Tvrtko Ursulin | 5e334c1 | 2016-08-10 16:16:46 +0100 | [diff] [blame] | 51 | #define I915_BXT_GUC_UCODE GUC_FW_PATH(bxt, BXT_FW_MAJOR, BXT_FW_MINOR) |
Nick Hoath | 57bf5c8 | 2016-05-06 11:42:53 +0100 | [diff] [blame] | 52 | MODULE_FIRMWARE(I915_BXT_GUC_UCODE); |
| 53 | |
Tvrtko Ursulin | 5e334c1 | 2016-08-10 16:16:46 +0100 | [diff] [blame] | 54 | #define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR) |
Peter Antoine | ff64cc1 | 2016-06-30 09:37:52 -0700 | [diff] [blame] | 55 | MODULE_FIRMWARE(I915_KBL_GUC_UCODE); |
| 56 | |
Anusha Srivatsa | 90f192c | 2017-03-30 13:24:06 -0700 | [diff] [blame] | 57 | #define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR) |
| 58 | |
Michal Wajdeczko | cd5a917 | 2017-10-16 14:47:15 +0000 | [diff] [blame] | 59 | /** |
| 60 | * intel_guc_fw_select() - selects GuC firmware for uploading |
| 61 | * |
| 62 | * @guc: intel_guc struct |
| 63 | * |
| 64 | * Return: zero when we know firmware, non-zero in other case |
| 65 | */ |
| 66 | int intel_guc_fw_select(struct intel_guc *guc) |
| 67 | { |
| 68 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
| 69 | |
| 70 | intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC); |
| 71 | |
| 72 | if (i915_modparams.guc_firmware_path) { |
| 73 | guc->fw.path = i915_modparams.guc_firmware_path; |
| 74 | guc->fw.major_ver_wanted = 0; |
| 75 | guc->fw.minor_ver_wanted = 0; |
| 76 | } else if (IS_SKYLAKE(dev_priv)) { |
| 77 | guc->fw.path = I915_SKL_GUC_UCODE; |
| 78 | guc->fw.major_ver_wanted = SKL_FW_MAJOR; |
| 79 | guc->fw.minor_ver_wanted = SKL_FW_MINOR; |
| 80 | } else if (IS_BROXTON(dev_priv)) { |
| 81 | guc->fw.path = I915_BXT_GUC_UCODE; |
| 82 | guc->fw.major_ver_wanted = BXT_FW_MAJOR; |
| 83 | guc->fw.minor_ver_wanted = BXT_FW_MINOR; |
| 84 | } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { |
| 85 | guc->fw.path = I915_KBL_GUC_UCODE; |
| 86 | guc->fw.major_ver_wanted = KBL_FW_MAJOR; |
| 87 | guc->fw.minor_ver_wanted = KBL_FW_MINOR; |
| 88 | } else if (IS_GEMINILAKE(dev_priv)) { |
| 89 | guc->fw.path = I915_GLK_GUC_UCODE; |
| 90 | guc->fw.major_ver_wanted = GLK_FW_MAJOR; |
| 91 | guc->fw.minor_ver_wanted = GLK_FW_MINOR; |
| 92 | } else { |
| 93 | DRM_ERROR("No GuC firmware known for platform with GuC!\n"); |
| 94 | return -ENOENT; |
| 95 | } |
| 96 | |
| 97 | return 0; |
| 98 | } |
| 99 | |
Michal Wajdeczko | 2ceddb1 | 2017-11-03 15:18:12 +0000 | [diff] [blame] | 100 | static void guc_prepare_xfer(struct intel_guc *guc) |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 101 | { |
Michal Wajdeczko | 4502e9e | 2017-10-16 14:47:21 +0000 | [diff] [blame] | 102 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 103 | |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 104 | /* Enable MIA caching. GuC clock gating is disabled. */ |
| 105 | I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE); |
| 106 | |
Jani Nikula | a117f37 | 2016-09-16 16:59:44 +0300 | [diff] [blame] | 107 | /* WaDisableMinuteIaClockGating:bxt */ |
Tvrtko Ursulin | e2d214a | 2016-10-13 11:03:04 +0100 | [diff] [blame] | 108 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { |
Nick Hoath | b970b48 | 2015-09-08 10:31:53 +0100 | [diff] [blame] | 109 | I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) & |
| 110 | ~GUC_ENABLE_MIA_CLOCK_GATING)); |
| 111 | } |
| 112 | |
Jani Nikula | 4ff40a4 | 2016-09-26 15:07:51 +0300 | [diff] [blame] | 113 | /* WaC6DisallowByGfxPause:bxt */ |
Tvrtko Ursulin | e2d214a | 2016-10-13 11:03:04 +0100 | [diff] [blame] | 114 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) |
Tim Gore | 65fe29e | 2016-07-20 11:00:25 +0100 | [diff] [blame] | 115 | I915_WRITE(GEN6_GFXPAUSE, 0x30FFF); |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 116 | |
Michel Thierry | 254e093 | 2017-01-09 16:51:35 +0200 | [diff] [blame] | 117 | if (IS_GEN9_LP(dev_priv)) |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 118 | I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE); |
| 119 | else |
| 120 | I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); |
| 121 | |
Tvrtko Ursulin | 5db9401 | 2016-10-13 11:03:10 +0100 | [diff] [blame] | 122 | if (IS_GEN9(dev_priv)) { |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 123 | /* DOP Clock Gating Enable for GuC clocks */ |
| 124 | I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE | |
| 125 | I915_READ(GEN7_MISCCPCTL))); |
| 126 | |
Dave Gordon | 0c5664e | 2016-09-12 21:19:36 +0100 | [diff] [blame] | 127 | /* allows for 5us (in 10ns units) before GT can go to RC6 */ |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 128 | I915_WRITE(GUC_ARAT_C6DIS, 0x1FF); |
| 129 | } |
Michal Wajdeczko | 2ceddb1 | 2017-11-03 15:18:12 +0000 | [diff] [blame] | 130 | } |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 131 | |
Michal Wajdeczko | 2ceddb1 | 2017-11-03 15:18:12 +0000 | [diff] [blame] | 132 | /* Copy RSA signature from the fw image to HW for verification */ |
| 133 | static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma) |
| 134 | { |
| 135 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
| 136 | struct intel_uc_fw *guc_fw = &guc->fw; |
| 137 | struct sg_table *sg = vma->pages; |
| 138 | u32 rsa[UOS_RSA_SCRATCH_MAX_COUNT]; |
| 139 | int i; |
| 140 | |
| 141 | if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), |
| 142 | guc_fw->rsa_offset) != sizeof(rsa)) |
| 143 | return -EINVAL; |
| 144 | |
| 145 | for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++) |
| 146 | I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); |
| 147 | |
| 148 | return 0; |
| 149 | } |
| 150 | |
| 151 | /* |
| 152 | * Transfer the firmware image to RAM for execution by the microcontroller. |
| 153 | * |
| 154 | * Architecturally, the DMA engine is bidirectional, and can potentially even |
| 155 | * transfer between GTT locations. This functionality is left out of the API |
| 156 | * for now as there is no need for it. |
| 157 | */ |
| 158 | static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma) |
| 159 | { |
| 160 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
| 161 | struct intel_uc_fw *guc_fw = &guc->fw; |
| 162 | unsigned long offset; |
Michal Wajdeczko | a86af23 | 2017-11-03 15:18:13 +0000 | [diff] [blame^] | 163 | u32 status; |
| 164 | int ret; |
Michal Wajdeczko | 2ceddb1 | 2017-11-03 15:18:12 +0000 | [diff] [blame] | 165 | |
| 166 | /* |
| 167 | * The header plus uCode will be copied to WOPCM via DMA, excluding any |
| 168 | * other components |
| 169 | */ |
| 170 | I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); |
| 171 | |
| 172 | /* Set the source address for the new blob */ |
| 173 | offset = guc_ggtt_offset(vma) + guc_fw->header_offset; |
| 174 | I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); |
| 175 | I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); |
| 176 | |
| 177 | /* |
| 178 | * Set the DMA destination. Current uCode expects the code to be |
| 179 | * loaded at 8k; locations below this are used for the stack. |
| 180 | */ |
| 181 | I915_WRITE(DMA_ADDR_1_LOW, 0x2000); |
| 182 | I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); |
| 183 | |
| 184 | /* Finally start the DMA */ |
| 185 | I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); |
| 186 | |
Michal Wajdeczko | a86af23 | 2017-11-03 15:18:13 +0000 | [diff] [blame^] | 187 | /* Wait for DMA to finish */ |
| 188 | ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, |
| 189 | 2, 100, &status); |
| 190 | DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status); |
| 191 | |
| 192 | return ret; |
Michal Wajdeczko | 2ceddb1 | 2017-11-03 15:18:12 +0000 | [diff] [blame] | 193 | } |
| 194 | |
| 195 | /* |
| 196 | * Read the GuC status register (GUC_STATUS) and store it in the |
| 197 | * specified location; then return a boolean indicating whether |
| 198 | * the value matches either of two values representing completion |
| 199 | * of the GuC boot process. |
| 200 | * |
| 201 | * This is used for polling the GuC status in a wait_for() |
| 202 | * loop below. |
| 203 | */ |
| 204 | static inline bool guc_ready(struct intel_guc *guc, u32 *status) |
| 205 | { |
| 206 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
| 207 | u32 val = I915_READ(GUC_STATUS); |
| 208 | u32 uk_val = val & GS_UKERNEL_MASK; |
| 209 | |
| 210 | *status = val; |
| 211 | return (uk_val == GS_UKERNEL_READY) || |
| 212 | ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE)); |
| 213 | } |
| 214 | |
| 215 | static int guc_wait_ucode(struct intel_guc *guc) |
| 216 | { |
| 217 | u32 status; |
| 218 | int ret; |
| 219 | |
| 220 | /* |
| 221 | * Wait for the GuC to start up. |
| 222 | * NB: Docs recommend not using the interrupt for completion. |
| 223 | * Measurements indicate this should take no more than 20ms, so a |
| 224 | * timeout here indicates that the GuC has failed and is unusable. |
| 225 | * (Higher levels of the driver will attempt to fall back to |
| 226 | * execlist mode if this happens.) |
| 227 | */ |
| 228 | ret = wait_for(guc_ready(guc, &status), 100); |
| 229 | DRM_DEBUG_DRIVER("GuC status %#x\n", status); |
| 230 | |
| 231 | if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { |
| 232 | DRM_ERROR("GuC firmware signature verification failed\n"); |
| 233 | ret = -ENOEXEC; |
| 234 | } |
| 235 | |
| 236 | return ret; |
| 237 | } |
| 238 | |
| 239 | /* |
| 240 | * Load the GuC firmware blob into the MinuteIA. |
| 241 | */ |
| 242 | static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) |
| 243 | { |
| 244 | struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); |
| 245 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
| 246 | int ret; |
| 247 | |
| 248 | GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); |
| 249 | |
| 250 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
| 251 | |
| 252 | guc_prepare_xfer(guc); |
| 253 | |
| 254 | /* |
| 255 | * Note that GuC needs the CSS header plus uKernel code to be copied |
| 256 | * by the DMA engine in one operation, whereas the RSA signature is |
| 257 | * loaded via MMIO. |
| 258 | */ |
| 259 | ret = guc_xfer_rsa(guc, vma); |
| 260 | if (ret) |
| 261 | DRM_WARN("GuC firmware signature xfer error %d\n", ret); |
| 262 | |
| 263 | ret = guc_xfer_ucode(guc, vma); |
| 264 | if (ret) |
| 265 | DRM_WARN("GuC firmware code xfer error %d\n", ret); |
| 266 | |
| 267 | ret = guc_wait_ucode(guc); |
| 268 | if (ret) |
| 269 | DRM_ERROR("GuC firmware xfer error %d\n", ret); |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 270 | |
| 271 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
| 272 | |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 273 | return ret; |
| 274 | } |
| 275 | |
| 276 | /** |
Michal Wajdeczko | e8668bb | 2017-10-16 14:47:14 +0000 | [diff] [blame] | 277 | * intel_guc_fw_upload() - finish preparing the GuC for activity |
Arkadiusz Hiler | 882d1db | 2017-03-14 15:28:07 +0100 | [diff] [blame] | 278 | * @guc: intel_guc structure |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 279 | * |
Arkadiusz Hiler | 882d1db | 2017-03-14 15:28:07 +0100 | [diff] [blame] | 280 | * Called during driver loading and also after a GPU reset. |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 281 | * |
Dave Gordon | f09d675 | 2016-05-13 15:36:29 +0100 | [diff] [blame] | 282 | * The main action required here it to load the GuC uCode into the device. |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 283 | * The firmware image should have already been fetched into memory by the |
Arkadiusz Hiler | 882d1db | 2017-03-14 15:28:07 +0100 | [diff] [blame] | 284 | * earlier call to intel_guc_init(), so here we need only check that |
| 285 | * worked, and then transfer the image to the h/w. |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 286 | * |
| 287 | * Return: non-zero code on error |
| 288 | */ |
Michal Wajdeczko | e8668bb | 2017-10-16 14:47:14 +0000 | [diff] [blame] | 289 | int intel_guc_fw_upload(struct intel_guc *guc) |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 290 | { |
Michal Wajdeczko | 2ceddb1 | 2017-11-03 15:18:12 +0000 | [diff] [blame] | 291 | return intel_uc_fw_upload(&guc->fw, guc_fw_xfer); |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 292 | } |