blob: 43e925933688742ae1496e5d43eafb58a6d77a60 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Dave Airliebc54fd12005-06-23 22:46:46 +10004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10007 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110028 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Jesse Barnese5747e32014-06-12 08:35:47 -070030#include <linux/acpi.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010031#include <linux/device.h>
32#include <linux/oom.h>
33#include <linux/module.h>
34#include <linux/pci.h>
35#include <linux/pm.h>
36#include <linux/pm_runtime.h>
37#include <linux/pnp.h>
38#include <linux/slab.h>
39#include <linux/vgaarb.h>
40#include <linux/vga_switcheroo.h>
41#include <linux/vt.h>
42#include <acpi/video.h>
43
David Howells760285e2012-10-02 18:01:07 +010044#include <drm/drmP.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010045#include <drm/drm_crtc_helper.h>
Maarten Lankhorsta667fb42016-12-15 15:29:44 +010046#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010047#include <drm/i915_drm.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include "i915_drv.h"
Chris Wilson990bbda2012-07-02 11:51:02 -030050#include "i915_trace.h"
Chris Wilson0673ad42016-06-24 14:00:22 +010051#include "i915_vgpu.h"
Kenneth Graunkef49f0582010-09-11 01:19:14 -070052#include "intel_drv.h"
Anusha Srivatsa5464cd62017-01-18 08:05:58 -080053#include "intel_uc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Kristian Høgsberg112b7152009-01-04 16:55:33 -050055static struct drm_driver driver;
56
Chris Wilson0673ad42016-06-24 14:00:22 +010057static unsigned int i915_load_fail_count;
58
59bool __i915_inject_load_failure(const char *func, int line)
60{
61 if (i915_load_fail_count >= i915.inject_load_failure)
62 return false;
63
64 if (++i915_load_fail_count == i915.inject_load_failure) {
65 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
66 i915.inject_load_failure, func, line);
67 return true;
68 }
69
70 return false;
71}
72
73#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
74#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
75 "providing the dmesg log by booting with drm.debug=0xf"
76
77void
78__i915_printk(struct drm_i915_private *dev_priv, const char *level,
79 const char *fmt, ...)
80{
81 static bool shown_bug_once;
David Weinehallc49d13e2016-08-22 13:32:42 +030082 struct device *kdev = dev_priv->drm.dev;
Chris Wilson0673ad42016-06-24 14:00:22 +010083 bool is_error = level[1] <= KERN_ERR[1];
84 bool is_debug = level[1] == KERN_DEBUG[1];
85 struct va_format vaf;
86 va_list args;
87
88 if (is_debug && !(drm_debug & DRM_UT_DRIVER))
89 return;
90
91 va_start(args, fmt);
92
93 vaf.fmt = fmt;
94 vaf.va = &args;
95
David Weinehallc49d13e2016-08-22 13:32:42 +030096 dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
Chris Wilson0673ad42016-06-24 14:00:22 +010097 __builtin_return_address(0), &vaf);
98
99 if (is_error && !shown_bug_once) {
David Weinehallc49d13e2016-08-22 13:32:42 +0300100 dev_notice(kdev, "%s", FDO_BUG_MSG);
Chris Wilson0673ad42016-06-24 14:00:22 +0100101 shown_bug_once = true;
102 }
103
104 va_end(args);
105}
106
107static bool i915_error_injected(struct drm_i915_private *dev_priv)
108{
109 return i915.inject_load_failure &&
110 i915_load_fail_count == i915.inject_load_failure;
111}
112
113#define i915_load_error(dev_priv, fmt, ...) \
114 __i915_printk(dev_priv, \
115 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
116 fmt, ##__VA_ARGS__)
117
118
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100119static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
Robert Beckett30c964a2015-08-28 13:10:22 +0100120{
121 enum intel_pch ret = PCH_NOP;
122
123 /*
124 * In a virtualized passthrough environment we can be in a
125 * setup where the ISA bridge is not able to be passed through.
126 * In this case, a south bridge can be emulated and we have to
127 * make an educated guess as to which PCH is really there.
128 */
129
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100130 if (IS_GEN5(dev_priv)) {
Robert Beckett30c964a2015-08-28 13:10:22 +0100131 ret = PCH_IBX;
132 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100133 } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
Robert Beckett30c964a2015-08-28 13:10:22 +0100134 ret = PCH_CPT;
Ville Syrjäläaa032132017-06-20 16:03:07 +0300135 DRM_DEBUG_KMS("Assuming CougarPoint PCH\n");
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100136 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Robert Beckett30c964a2015-08-28 13:10:22 +0100137 ret = PCH_LPT;
138 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100139 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
Robert Beckett30c964a2015-08-28 13:10:22 +0100140 ret = PCH_SPT;
141 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
Rodrigo Vivi80937812017-06-08 08:49:59 -0700142 } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
Rodrigo Viviacf1dba2017-06-06 13:30:31 -0700143 ret = PCH_CNP;
Rodrigo Vivi80937812017-06-08 08:49:59 -0700144 DRM_DEBUG_KMS("Assuming CannonPoint PCH\n");
Robert Beckett30c964a2015-08-28 13:10:22 +0100145 }
146
147 return ret;
148}
149
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000150static void intel_detect_pch(struct drm_i915_private *dev_priv)
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800151{
Imre Deakbcdb72a2014-02-14 20:23:54 +0200152 struct pci_dev *pch = NULL;
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800153
Ben Widawskyce1bb322013-04-05 13:12:44 -0700154 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
155 * (which really amounts to a PCH but no South Display).
156 */
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +0000157 if (INTEL_INFO(dev_priv)->num_pipes == 0) {
Ben Widawskyce1bb322013-04-05 13:12:44 -0700158 dev_priv->pch_type = PCH_NOP;
Ben Widawskyce1bb322013-04-05 13:12:44 -0700159 return;
160 }
161
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800162 /*
163 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
164 * make graphics device passthrough work easy for VMM, that only
165 * need to expose ISA bridge to let driver know the real hardware
166 * underneath. This is a requirement from virtualization team.
Rui Guo6a9c4b32013-06-19 21:10:23 +0800167 *
168 * In some virtualized environments (e.g. XEN), there is irrelevant
169 * ISA bridge in the system. To work reliably, we should scan trhough
170 * all the ISA bridge devices and check for the first match, instead
171 * of only checking the first one.
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800172 */
Imre Deakbcdb72a2014-02-14 20:23:54 +0200173 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800174 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
Imre Deakbcdb72a2014-02-14 20:23:54 +0200175 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
Ville Syrjäläc5e855d2017-06-21 20:49:44 +0300176
177 dev_priv->pch_id = id;
Dhinakaran Pandiyanec7e0bb2017-06-02 13:06:40 -0700178
Jesse Barnes90711d52011-04-28 14:48:02 -0700179 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
180 dev_priv->pch_type = PCH_IBX;
181 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
Tvrtko Ursulin5db94012016-10-13 11:03:10 +0100182 WARN_ON(!IS_GEN5(dev_priv));
Jesse Barnes90711d52011-04-28 14:48:02 -0700183 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800184 dev_priv->pch_type = PCH_CPT;
185 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
Ville Syrjäläd4cdbf02017-06-20 16:03:09 +0300186 WARN_ON(!IS_GEN6(dev_priv) &&
187 !IS_IVYBRIDGE(dev_priv));
Jesse Barnesc7925132011-04-07 12:33:56 -0700188 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
189 /* PantherPoint is CPT compatible */
190 dev_priv->pch_type = PCH_CPT;
Jani Nikula492ab662013-10-01 12:12:33 +0300191 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
Ville Syrjäläd4cdbf02017-06-20 16:03:09 +0300192 WARN_ON(!IS_GEN6(dev_priv) &&
193 !IS_IVYBRIDGE(dev_priv));
Eugeni Dodonoveb877eb2012-03-29 12:32:20 -0300194 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
195 dev_priv->pch_type = PCH_LPT;
196 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
Tvrtko Ursulin86527442016-10-13 11:03:00 +0100197 WARN_ON(!IS_HASWELL(dev_priv) &&
198 !IS_BROADWELL(dev_priv));
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100199 WARN_ON(IS_HSW_ULT(dev_priv) ||
200 IS_BDW_ULT(dev_priv));
Ben Widawskye76e0632013-11-07 21:40:41 -0800201 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
202 dev_priv->pch_type = PCH_LPT;
203 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
Tvrtko Ursulin86527442016-10-13 11:03:00 +0100204 WARN_ON(!IS_HASWELL(dev_priv) &&
205 !IS_BROADWELL(dev_priv));
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100206 WARN_ON(!IS_HSW_ULT(dev_priv) &&
207 !IS_BDW_ULT(dev_priv));
Ville Syrjäläc5e855d2017-06-21 20:49:44 +0300208 } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) {
209 /* WildcatPoint is LPT compatible */
210 dev_priv->pch_type = PCH_LPT;
211 DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
212 WARN_ON(!IS_HASWELL(dev_priv) &&
213 !IS_BROADWELL(dev_priv));
214 WARN_ON(IS_HSW_ULT(dev_priv) ||
215 IS_BDW_ULT(dev_priv));
216 } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) {
217 /* WildcatPoint is LPT compatible */
218 dev_priv->pch_type = PCH_LPT;
219 DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
220 WARN_ON(!IS_HASWELL(dev_priv) &&
221 !IS_BROADWELL(dev_priv));
222 WARN_ON(!IS_HSW_ULT(dev_priv) &&
223 !IS_BDW_ULT(dev_priv));
Satheeshakrishna Me7e7ea22014-04-09 11:08:57 +0530224 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
225 dev_priv->pch_type = PCH_SPT;
226 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
Tvrtko Ursulin08537232016-10-13 11:03:02 +0100227 WARN_ON(!IS_SKYLAKE(dev_priv) &&
228 !IS_KABYLAKE(dev_priv));
Ville Syrjäläc5e855d2017-06-21 20:49:44 +0300229 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
Satheeshakrishna Me7e7ea22014-04-09 11:08:57 +0530230 dev_priv->pch_type = PCH_SPT;
231 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
Tvrtko Ursulin08537232016-10-13 11:03:02 +0100232 WARN_ON(!IS_SKYLAKE(dev_priv) &&
233 !IS_KABYLAKE(dev_priv));
Rodrigo Vivi22dea0b2016-07-01 17:07:12 -0700234 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
235 dev_priv->pch_type = PCH_KBP;
236 DRM_DEBUG_KMS("Found KabyPoint PCH\n");
Jani Nikula85327742017-02-01 15:46:09 +0200237 WARN_ON(!IS_SKYLAKE(dev_priv) &&
238 !IS_KABYLAKE(dev_priv));
Rodrigo Vivi7b22b8c2017-06-02 13:06:39 -0700239 } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
240 dev_priv->pch_type = PCH_CNP;
241 DRM_DEBUG_KMS("Found CannonPoint PCH\n");
Rodrigo Vivi80937812017-06-08 08:49:59 -0700242 WARN_ON(!IS_CANNONLAKE(dev_priv) &&
243 !IS_COFFEELAKE(dev_priv));
Ville Syrjäläc5e855d2017-06-21 20:49:44 +0300244 } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
Dhinakaran Pandiyanec7e0bb2017-06-02 13:06:40 -0700245 dev_priv->pch_type = PCH_CNP;
246 DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
Rodrigo Vivi80937812017-06-08 08:49:59 -0700247 WARN_ON(!IS_CANNONLAKE(dev_priv) &&
248 !IS_COFFEELAKE(dev_priv));
Ville Syrjäläd4cdbf02017-06-20 16:03:09 +0300249 } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
250 id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
251 (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
Gerd Hoffmann94bb4892016-06-13 14:38:56 +0200252 pch->subsystem_vendor ==
253 PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
254 pch->subsystem_device ==
255 PCI_SUBDEVICE_ID_QEMU)) {
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100256 dev_priv->pch_type =
257 intel_virt_detect_pch(dev_priv);
Imre Deakbcdb72a2014-02-14 20:23:54 +0200258 } else
259 continue;
260
Rui Guo6a9c4b32013-06-19 21:10:23 +0800261 break;
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800262 }
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800263 }
Rui Guo6a9c4b32013-06-19 21:10:23 +0800264 if (!pch)
Imre Deakbcdb72a2014-02-14 20:23:54 +0200265 DRM_DEBUG_KMS("No PCH found.\n");
266
267 pci_dev_put(pch);
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800268}
269
Chris Wilson0673ad42016-06-24 14:00:22 +0100270static int i915_getparam(struct drm_device *dev, void *data,
271 struct drm_file *file_priv)
272{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100273 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +0300274 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100275 drm_i915_getparam_t *param = data;
276 int value;
277
278 switch (param->param) {
279 case I915_PARAM_IRQ_ACTIVE:
280 case I915_PARAM_ALLOW_BATCHBUFFER:
281 case I915_PARAM_LAST_DISPATCH:
Kenneth Graunkeef0f4112017-02-15 01:34:46 -0800282 case I915_PARAM_HAS_EXEC_CONSTANTS:
Chris Wilson0673ad42016-06-24 14:00:22 +0100283 /* Reject all old ums/dri params. */
284 return -ENODEV;
285 case I915_PARAM_CHIPSET_ID:
David Weinehall52a05c32016-08-22 13:32:44 +0300286 value = pdev->device;
Chris Wilson0673ad42016-06-24 14:00:22 +0100287 break;
288 case I915_PARAM_REVISION:
David Weinehall52a05c32016-08-22 13:32:44 +0300289 value = pdev->revision;
Chris Wilson0673ad42016-06-24 14:00:22 +0100290 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100291 case I915_PARAM_NUM_FENCES_AVAIL:
292 value = dev_priv->num_fence_regs;
293 break;
294 case I915_PARAM_HAS_OVERLAY:
295 value = dev_priv->overlay ? 1 : 0;
296 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100297 case I915_PARAM_HAS_BSD:
Akash Goel3b3f1652016-10-13 22:44:48 +0530298 value = !!dev_priv->engine[VCS];
Chris Wilson0673ad42016-06-24 14:00:22 +0100299 break;
300 case I915_PARAM_HAS_BLT:
Akash Goel3b3f1652016-10-13 22:44:48 +0530301 value = !!dev_priv->engine[BCS];
Chris Wilson0673ad42016-06-24 14:00:22 +0100302 break;
303 case I915_PARAM_HAS_VEBOX:
Akash Goel3b3f1652016-10-13 22:44:48 +0530304 value = !!dev_priv->engine[VECS];
Chris Wilson0673ad42016-06-24 14:00:22 +0100305 break;
306 case I915_PARAM_HAS_BSD2:
Akash Goel3b3f1652016-10-13 22:44:48 +0530307 value = !!dev_priv->engine[VCS2];
Chris Wilson0673ad42016-06-24 14:00:22 +0100308 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100309 case I915_PARAM_HAS_LLC:
David Weinehall16162472016-09-02 13:46:17 +0300310 value = HAS_LLC(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100311 break;
312 case I915_PARAM_HAS_WT:
David Weinehall16162472016-09-02 13:46:17 +0300313 value = HAS_WT(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100314 break;
315 case I915_PARAM_HAS_ALIASING_PPGTT:
David Weinehall16162472016-09-02 13:46:17 +0300316 value = USES_PPGTT(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100317 break;
318 case I915_PARAM_HAS_SEMAPHORES:
Chris Wilson39df9192016-07-20 13:31:57 +0100319 value = i915.semaphores;
Chris Wilson0673ad42016-06-24 14:00:22 +0100320 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100321 case I915_PARAM_HAS_SECURE_BATCHES:
322 value = capable(CAP_SYS_ADMIN);
323 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100324 case I915_PARAM_CMD_PARSER_VERSION:
325 value = i915_cmd_parser_get_version(dev_priv);
326 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100327 case I915_PARAM_SUBSLICE_TOTAL:
Imre Deak57ec1712016-08-31 19:13:05 +0300328 value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
Chris Wilson0673ad42016-06-24 14:00:22 +0100329 if (!value)
330 return -ENODEV;
331 break;
332 case I915_PARAM_EU_TOTAL:
Imre Deak43b67992016-08-31 19:13:02 +0300333 value = INTEL_INFO(dev_priv)->sseu.eu_total;
Chris Wilson0673ad42016-06-24 14:00:22 +0100334 if (!value)
335 return -ENODEV;
336 break;
337 case I915_PARAM_HAS_GPU_RESET:
338 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
Michel Thierry142bc7d2017-06-20 10:57:46 +0100339 if (value && intel_has_reset_engine(dev_priv))
340 value = 2;
Chris Wilson0673ad42016-06-24 14:00:22 +0100341 break;
342 case I915_PARAM_HAS_RESOURCE_STREAMER:
David Weinehall16162472016-09-02 13:46:17 +0300343 value = HAS_RESOURCE_STREAMER(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100344 break;
arun.siluvery@linux.intel.com37f501a2016-07-01 11:43:02 +0100345 case I915_PARAM_HAS_POOLED_EU:
David Weinehall16162472016-09-02 13:46:17 +0300346 value = HAS_POOLED_EU(dev_priv);
arun.siluvery@linux.intel.com37f501a2016-07-01 11:43:02 +0100347 break;
348 case I915_PARAM_MIN_EU_IN_POOL:
Imre Deak43b67992016-08-31 19:13:02 +0300349 value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
arun.siluvery@linux.intel.com37f501a2016-07-01 11:43:02 +0100350 break;
Anusha Srivatsa5464cd62017-01-18 08:05:58 -0800351 case I915_PARAM_HUC_STATUS:
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +0530352 intel_runtime_pm_get(dev_priv);
Anusha Srivatsa5464cd62017-01-18 08:05:58 -0800353 value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +0530354 intel_runtime_pm_put(dev_priv);
Anusha Srivatsa5464cd62017-01-18 08:05:58 -0800355 break;
Chris Wilson4cc69072016-08-25 19:05:19 +0100356 case I915_PARAM_MMAP_GTT_VERSION:
357 /* Though we've started our numbering from 1, and so class all
358 * earlier versions as 0, in effect their value is undefined as
359 * the ioctl will report EINVAL for the unknown param!
360 */
361 value = i915_gem_mmap_gtt_version();
362 break;
Chris Wilson0de91362016-11-14 20:41:01 +0000363 case I915_PARAM_HAS_SCHEDULER:
364 value = dev_priv->engine[RCS] &&
365 dev_priv->engine[RCS]->schedule;
366 break;
David Weinehall16162472016-09-02 13:46:17 +0300367 case I915_PARAM_MMAP_VERSION:
368 /* Remember to bump this if the version changes! */
369 case I915_PARAM_HAS_GEM:
370 case I915_PARAM_HAS_PAGEFLIPPING:
371 case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
372 case I915_PARAM_HAS_RELAXED_FENCING:
373 case I915_PARAM_HAS_COHERENT_RINGS:
374 case I915_PARAM_HAS_RELAXED_DELTA:
375 case I915_PARAM_HAS_GEN7_SOL_RESET:
376 case I915_PARAM_HAS_WAIT_TIMEOUT:
377 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
378 case I915_PARAM_HAS_PINNED_BATCHES:
379 case I915_PARAM_HAS_EXEC_NO_RELOC:
380 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
381 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
382 case I915_PARAM_HAS_EXEC_SOFTPIN:
Chris Wilson77ae9952017-01-27 09:40:07 +0000383 case I915_PARAM_HAS_EXEC_ASYNC:
Chris Wilsonfec04452017-01-27 09:40:08 +0000384 case I915_PARAM_HAS_EXEC_FENCE:
Chris Wilsonb0fd47a2017-04-15 10:39:02 +0100385 case I915_PARAM_HAS_EXEC_CAPTURE:
Chris Wilson1a71cf22017-06-16 15:05:23 +0100386 case I915_PARAM_HAS_EXEC_BATCH_FIRST:
David Weinehall16162472016-09-02 13:46:17 +0300387 /* For the time being all of these are always true;
388 * if some supported hardware does not have one of these
389 * features this value needs to be provided from
390 * INTEL_INFO(), a feature macro, or similar.
391 */
392 value = 1;
393 break;
Robert Bragg7fed5552017-06-13 12:22:59 +0100394 case I915_PARAM_SLICE_MASK:
395 value = INTEL_INFO(dev_priv)->sseu.slice_mask;
396 if (!value)
397 return -ENODEV;
398 break;
Robert Braggf5320232017-06-13 12:23:00 +0100399 case I915_PARAM_SUBSLICE_MASK:
400 value = INTEL_INFO(dev_priv)->sseu.subslice_mask;
401 if (!value)
402 return -ENODEV;
403 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100404 default:
405 DRM_DEBUG("Unknown parameter %d\n", param->param);
406 return -EINVAL;
407 }
408
Chris Wilsondda33002016-06-24 14:00:23 +0100409 if (put_user(value, param->value))
Chris Wilson0673ad42016-06-24 14:00:22 +0100410 return -EFAULT;
Chris Wilson0673ad42016-06-24 14:00:22 +0100411
412 return 0;
413}
414
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000415static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100416{
Chris Wilson0673ad42016-06-24 14:00:22 +0100417 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
418 if (!dev_priv->bridge_dev) {
419 DRM_ERROR("bridge device not found\n");
420 return -1;
421 }
422 return 0;
423}
424
425/* Allocate space for the MCH regs if needed, return nonzero on error */
426static int
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000427intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100428{
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000429 int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
Chris Wilson0673ad42016-06-24 14:00:22 +0100430 u32 temp_lo, temp_hi = 0;
431 u64 mchbar_addr;
432 int ret;
433
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000434 if (INTEL_GEN(dev_priv) >= 4)
Chris Wilson0673ad42016-06-24 14:00:22 +0100435 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
436 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
437 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
438
439 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
440#ifdef CONFIG_PNP
441 if (mchbar_addr &&
442 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
443 return 0;
444#endif
445
446 /* Get some space for it */
447 dev_priv->mch_res.name = "i915 MCHBAR";
448 dev_priv->mch_res.flags = IORESOURCE_MEM;
449 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
450 &dev_priv->mch_res,
451 MCHBAR_SIZE, MCHBAR_SIZE,
452 PCIBIOS_MIN_MEM,
453 0, pcibios_align_resource,
454 dev_priv->bridge_dev);
455 if (ret) {
456 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
457 dev_priv->mch_res.start = 0;
458 return ret;
459 }
460
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000461 if (INTEL_GEN(dev_priv) >= 4)
Chris Wilson0673ad42016-06-24 14:00:22 +0100462 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
463 upper_32_bits(dev_priv->mch_res.start));
464
465 pci_write_config_dword(dev_priv->bridge_dev, reg,
466 lower_32_bits(dev_priv->mch_res.start));
467 return 0;
468}
469
470/* Setup MCHBAR if possible, return true if we should disable it again */
471static void
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000472intel_setup_mchbar(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100473{
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000474 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
Chris Wilson0673ad42016-06-24 14:00:22 +0100475 u32 temp;
476 bool enabled;
477
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +0100478 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100479 return;
480
481 dev_priv->mchbar_need_disable = false;
482
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100483 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100484 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
485 enabled = !!(temp & DEVEN_MCHBAR_EN);
486 } else {
487 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
488 enabled = temp & 1;
489 }
490
491 /* If it's already enabled, don't have to do anything */
492 if (enabled)
493 return;
494
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000495 if (intel_alloc_mchbar_resource(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100496 return;
497
498 dev_priv->mchbar_need_disable = true;
499
500 /* Space is allocated or reserved, so enable it. */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100501 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100502 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
503 temp | DEVEN_MCHBAR_EN);
504 } else {
505 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
506 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
507 }
508}
509
510static void
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000511intel_teardown_mchbar(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100512{
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000513 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
Chris Wilson0673ad42016-06-24 14:00:22 +0100514
515 if (dev_priv->mchbar_need_disable) {
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100516 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100517 u32 deven_val;
518
519 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
520 &deven_val);
521 deven_val &= ~DEVEN_MCHBAR_EN;
522 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
523 deven_val);
524 } else {
525 u32 mchbar_val;
526
527 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
528 &mchbar_val);
529 mchbar_val &= ~1;
530 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
531 mchbar_val);
532 }
533 }
534
535 if (dev_priv->mch_res.start)
536 release_resource(&dev_priv->mch_res);
537}
538
539/* true = enable decode, false = disable decoder */
540static unsigned int i915_vga_set_decode(void *cookie, bool state)
541{
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000542 struct drm_i915_private *dev_priv = cookie;
Chris Wilson0673ad42016-06-24 14:00:22 +0100543
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000544 intel_modeset_vga_set_state(dev_priv, state);
Chris Wilson0673ad42016-06-24 14:00:22 +0100545 if (state)
546 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
547 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
548 else
549 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
550}
551
Tvrtko Ursulin7f26cb82016-12-01 14:16:41 +0000552static int i915_resume_switcheroo(struct drm_device *dev);
553static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
554
Chris Wilson0673ad42016-06-24 14:00:22 +0100555static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
556{
557 struct drm_device *dev = pci_get_drvdata(pdev);
558 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
559
560 if (state == VGA_SWITCHEROO_ON) {
561 pr_info("switched on\n");
562 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
563 /* i915 resume handler doesn't set to D0 */
David Weinehall52a05c32016-08-22 13:32:44 +0300564 pci_set_power_state(pdev, PCI_D0);
Chris Wilson0673ad42016-06-24 14:00:22 +0100565 i915_resume_switcheroo(dev);
566 dev->switch_power_state = DRM_SWITCH_POWER_ON;
567 } else {
568 pr_info("switched off\n");
569 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
570 i915_suspend_switcheroo(dev, pmm);
571 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
572 }
573}
574
575static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
576{
577 struct drm_device *dev = pci_get_drvdata(pdev);
578
579 /*
580 * FIXME: open_count is protected by drm_global_mutex but that would lead to
581 * locking inversion with the driver load path. And the access here is
582 * completely racy anyway. So don't bother with locking for now.
583 */
584 return dev->open_count == 0;
585}
586
587static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
588 .set_gpu_state = i915_switcheroo_set_state,
589 .reprobe = NULL,
590 .can_switch = i915_switcheroo_can_switch,
591};
592
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100593static void i915_gem_fini(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100594{
Chris Wilson5f09a9c2017-06-20 12:05:46 +0100595 flush_workqueue(dev_priv->wq);
596
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100597 mutex_lock(&dev_priv->drm.struct_mutex);
Oscar Mateob8991402017-03-28 09:53:47 -0700598 intel_uc_fini_hw(dev_priv);
Tvrtko Ursulincb15d9f2016-12-01 14:16:39 +0000599 i915_gem_cleanup_engines(dev_priv);
Chris Wilson829a0af2017-06-20 12:05:45 +0100600 i915_gem_contexts_fini(dev_priv);
Chris Wilson8a2421b2017-06-16 15:05:22 +0100601 i915_gem_cleanup_userptr(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100602 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson0673ad42016-06-24 14:00:22 +0100603
Chris Wilsonbdeb9782016-12-23 14:57:56 +0000604 i915_gem_drain_freed_objects(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100605
Chris Wilson829a0af2017-06-20 12:05:45 +0100606 WARN_ON(!list_empty(&dev_priv->contexts.list));
Chris Wilson0673ad42016-06-24 14:00:22 +0100607}
608
609static int i915_load_modeset_init(struct drm_device *dev)
610{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100611 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +0300612 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100613 int ret;
614
615 if (i915_inject_load_failure())
616 return -ENODEV;
617
Jani Nikula66578852017-03-10 15:27:57 +0200618 intel_bios_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100619
620 /* If we have > 1 VGA cards, then we need to arbitrate access
621 * to the common VGA resources.
622 *
623 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
624 * then we do not take part in VGA arbitration and the
625 * vga_client_register() fails with -ENODEV.
626 */
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000627 ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
Chris Wilson0673ad42016-06-24 14:00:22 +0100628 if (ret && ret != -ENODEV)
629 goto out;
630
631 intel_register_dsm_handler();
632
David Weinehall52a05c32016-08-22 13:32:44 +0300633 ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
Chris Wilson0673ad42016-06-24 14:00:22 +0100634 if (ret)
635 goto cleanup_vga_client;
636
637 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
638 intel_update_rawclk(dev_priv);
639
640 intel_power_domains_init_hw(dev_priv, false);
641
642 intel_csr_ucode_init(dev_priv);
643
644 ret = intel_irq_install(dev_priv);
645 if (ret)
646 goto cleanup_csr;
647
Tvrtko Ursulin40196442016-12-01 14:16:42 +0000648 intel_setup_gmbus(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100649
650 /* Important: The output setup functions called by modeset_init need
651 * working irqs for e.g. gmbus and dp aux transfers. */
Ville Syrjäläb079bd172016-10-25 18:58:02 +0300652 ret = intel_modeset_init(dev);
653 if (ret)
654 goto cleanup_irq;
Chris Wilson0673ad42016-06-24 14:00:22 +0100655
Arkadiusz Hiler29ad6a32017-03-14 15:28:09 +0100656 intel_uc_init_fw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100657
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +0000658 ret = i915_gem_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100659 if (ret)
Oscar Mateo3950bf32017-03-22 10:39:46 -0700660 goto cleanup_uc;
Chris Wilson0673ad42016-06-24 14:00:22 +0100661
662 intel_modeset_gem_init(dev);
663
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +0000664 if (INTEL_INFO(dev_priv)->num_pipes == 0)
Chris Wilson0673ad42016-06-24 14:00:22 +0100665 return 0;
666
667 ret = intel_fbdev_init(dev);
668 if (ret)
669 goto cleanup_gem;
670
671 /* Only enable hotplug handling once the fbdev is fully set up. */
672 intel_hpd_init(dev_priv);
673
674 drm_kms_helper_poll_init(dev);
675
676 return 0;
677
678cleanup_gem:
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +0000679 if (i915_gem_suspend(dev_priv))
Imre Deak1c777c52016-10-12 17:46:37 +0300680 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100681 i915_gem_fini(dev_priv);
Oscar Mateo3950bf32017-03-22 10:39:46 -0700682cleanup_uc:
683 intel_uc_fini_fw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100684cleanup_irq:
Chris Wilson0673ad42016-06-24 14:00:22 +0100685 drm_irq_uninstall(dev);
Tvrtko Ursulin40196442016-12-01 14:16:42 +0000686 intel_teardown_gmbus(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100687cleanup_csr:
688 intel_csr_ucode_fini(dev_priv);
689 intel_power_domains_fini(dev_priv);
David Weinehall52a05c32016-08-22 13:32:44 +0300690 vga_switcheroo_unregister_client(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100691cleanup_vga_client:
David Weinehall52a05c32016-08-22 13:32:44 +0300692 vga_client_register(pdev, NULL, NULL, NULL);
Chris Wilson0673ad42016-06-24 14:00:22 +0100693out:
694 return ret;
695}
696
Chris Wilson0673ad42016-06-24 14:00:22 +0100697static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
698{
699 struct apertures_struct *ap;
Chris Wilson91c8a322016-07-05 10:40:23 +0100700 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100701 struct i915_ggtt *ggtt = &dev_priv->ggtt;
702 bool primary;
703 int ret;
704
705 ap = alloc_apertures(1);
706 if (!ap)
707 return -ENOMEM;
708
709 ap->ranges[0].base = ggtt->mappable_base;
710 ap->ranges[0].size = ggtt->mappable_end;
711
712 primary =
713 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
714
Daniel Vetter44adece2016-08-10 18:52:34 +0200715 ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
Chris Wilson0673ad42016-06-24 14:00:22 +0100716
717 kfree(ap);
718
719 return ret;
720}
Chris Wilson0673ad42016-06-24 14:00:22 +0100721
722#if !defined(CONFIG_VGA_CONSOLE)
723static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
724{
725 return 0;
726}
727#elif !defined(CONFIG_DUMMY_CONSOLE)
728static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
729{
730 return -ENODEV;
731}
732#else
733static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
734{
735 int ret = 0;
736
737 DRM_INFO("Replacing VGA console driver\n");
738
739 console_lock();
740 if (con_is_bound(&vga_con))
741 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
742 if (ret == 0) {
743 ret = do_unregister_con_driver(&vga_con);
744
745 /* Ignore "already unregistered". */
746 if (ret == -ENODEV)
747 ret = 0;
748 }
749 console_unlock();
750
751 return ret;
752}
753#endif
754
Chris Wilson0673ad42016-06-24 14:00:22 +0100755static void intel_init_dpio(struct drm_i915_private *dev_priv)
756{
757 /*
758 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
759 * CHV x1 PHY (DP/HDMI D)
760 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
761 */
762 if (IS_CHERRYVIEW(dev_priv)) {
763 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
764 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
765 } else if (IS_VALLEYVIEW(dev_priv)) {
766 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
767 }
768}
769
770static int i915_workqueues_init(struct drm_i915_private *dev_priv)
771{
772 /*
773 * The i915 workqueue is primarily used for batched retirement of
774 * requests (and thus managing bo) once the task has been completed
775 * by the GPU. i915_gem_retire_requests() is called directly when we
776 * need high-priority retirement, such as waiting for an explicit
777 * bo.
778 *
779 * It is also used for periodic low-priority events, such as
780 * idle-timers and recording error state.
781 *
782 * All tasks on the workqueue are expected to acquire the dev mutex
783 * so there is no point in running more than one instance of the
784 * workqueue at any time. Use an ordered one.
785 */
786 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
787 if (dev_priv->wq == NULL)
788 goto out_err;
789
790 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
791 if (dev_priv->hotplug.dp_wq == NULL)
792 goto out_free_wq;
793
Chris Wilson0673ad42016-06-24 14:00:22 +0100794 return 0;
795
Chris Wilson0673ad42016-06-24 14:00:22 +0100796out_free_wq:
797 destroy_workqueue(dev_priv->wq);
798out_err:
799 DRM_ERROR("Failed to allocate workqueues.\n");
800
801 return -ENOMEM;
802}
803
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000804static void i915_engines_cleanup(struct drm_i915_private *i915)
805{
806 struct intel_engine_cs *engine;
807 enum intel_engine_id id;
808
809 for_each_engine(engine, i915, id)
810 kfree(engine);
811}
812
Chris Wilson0673ad42016-06-24 14:00:22 +0100813static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
814{
Chris Wilson0673ad42016-06-24 14:00:22 +0100815 destroy_workqueue(dev_priv->hotplug.dp_wq);
816 destroy_workqueue(dev_priv->wq);
817}
818
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300819/*
820 * We don't keep the workarounds for pre-production hardware, so we expect our
821 * driver to fail on these machines in one way or another. A little warning on
822 * dmesg may help both the user and the bug triagers.
823 */
824static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
825{
Chris Wilson248a1242017-01-30 10:44:56 +0000826 bool pre = false;
827
828 pre |= IS_HSW_EARLY_SDV(dev_priv);
829 pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
Chris Wilson0102ba12017-01-30 10:44:58 +0000830 pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
Chris Wilson248a1242017-01-30 10:44:56 +0000831
Chris Wilson7c5ff4a2017-01-30 10:44:57 +0000832 if (pre) {
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300833 DRM_ERROR("This is a pre-production stepping. "
834 "It may not be fully functional.\n");
Chris Wilson7c5ff4a2017-01-30 10:44:57 +0000835 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
836 }
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300837}
838
Chris Wilson0673ad42016-06-24 14:00:22 +0100839/**
840 * i915_driver_init_early - setup state not requiring device access
841 * @dev_priv: device private
842 *
843 * Initialize everything that is a "SW-only" state, that is state not
844 * requiring accessing the device or exposing the driver via kernel internal
845 * or userspace interfaces. Example steps belonging here: lock initialization,
846 * system memory allocation, setting up device specific attributes and
847 * function hooks not requiring accessing the device.
848 */
849static int i915_driver_init_early(struct drm_i915_private *dev_priv,
850 const struct pci_device_id *ent)
851{
852 const struct intel_device_info *match_info =
853 (struct intel_device_info *)ent->driver_data;
854 struct intel_device_info *device_info;
855 int ret = 0;
856
857 if (i915_inject_load_failure())
858 return -ENODEV;
859
860 /* Setup the write-once "constant" device info */
Chris Wilson94b4f3b2016-07-05 10:40:20 +0100861 device_info = mkwrite_device_info(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100862 memcpy(device_info, match_info, sizeof(*device_info));
863 device_info->device_id = dev_priv->drm.pdev->device;
864
865 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
866 device_info->gen_mask = BIT(device_info->gen - 1);
867
868 spin_lock_init(&dev_priv->irq_lock);
869 spin_lock_init(&dev_priv->gpu_error.lock);
870 mutex_init(&dev_priv->backlight_lock);
871 spin_lock_init(&dev_priv->uncore.lock);
Lyude317eaa92017-02-03 21:18:25 -0500872
Chris Wilson0673ad42016-06-24 14:00:22 +0100873 spin_lock_init(&dev_priv->mm.object_stat_lock);
874 spin_lock_init(&dev_priv->mmio_flip_lock);
875 mutex_init(&dev_priv->sb_lock);
876 mutex_init(&dev_priv->modeset_restore_lock);
877 mutex_init(&dev_priv->av_mutex);
878 mutex_init(&dev_priv->wm.wm_mutex);
879 mutex_init(&dev_priv->pps_mutex);
880
Arkadiusz Hiler413e8fd2016-11-25 18:59:36 +0100881 intel_uc_init_early(dev_priv);
Chris Wilson0b1de5d2016-08-12 12:39:59 +0100882 i915_memcpy_init_early(dev_priv);
883
Chris Wilson0673ad42016-06-24 14:00:22 +0100884 ret = i915_workqueues_init(dev_priv);
885 if (ret < 0)
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000886 goto err_engines;
Chris Wilson0673ad42016-06-24 14:00:22 +0100887
Chris Wilson0673ad42016-06-24 14:00:22 +0100888 /* This must be called before any calls to HAS_PCH_* */
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000889 intel_detect_pch(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100890
Tvrtko Ursulin192aa182016-12-01 14:16:45 +0000891 intel_pm_setup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100892 intel_init_dpio(dev_priv);
893 intel_power_domains_init(dev_priv);
894 intel_irq_init(dev_priv);
Mika Kuoppala3ac168a2016-11-01 18:43:03 +0200895 intel_hangcheck_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100896 intel_init_display_hooks(dev_priv);
897 intel_init_clock_gating_hooks(dev_priv);
898 intel_init_audio_hooks(dev_priv);
Tvrtko Ursulincb15d9f2016-12-01 14:16:39 +0000899 ret = i915_gem_load_init(dev_priv);
Chris Wilson73cb9702016-10-28 13:58:46 +0100900 if (ret < 0)
Joonas Lahtinencefcff82017-04-28 10:58:39 +0300901 goto err_irq;
Chris Wilson0673ad42016-06-24 14:00:22 +0100902
David Weinehall36cdd012016-08-22 13:59:31 +0300903 intel_display_crc_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100904
Chris Wilson94b4f3b2016-07-05 10:40:20 +0100905 intel_device_info_dump(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100906
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300907 intel_detect_preproduction_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100908
Robert Braggeec688e2016-11-07 19:49:47 +0000909 i915_perf_init(dev_priv);
910
Chris Wilson0673ad42016-06-24 14:00:22 +0100911 return 0;
912
Joonas Lahtinencefcff82017-04-28 10:58:39 +0300913err_irq:
914 intel_irq_fini(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100915 i915_workqueues_cleanup(dev_priv);
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000916err_engines:
917 i915_engines_cleanup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100918 return ret;
919}
920
921/**
922 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
923 * @dev_priv: device private
924 */
925static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
926{
Robert Braggeec688e2016-11-07 19:49:47 +0000927 i915_perf_fini(dev_priv);
Tvrtko Ursulincb15d9f2016-12-01 14:16:39 +0000928 i915_gem_load_cleanup(dev_priv);
Joonas Lahtinencefcff82017-04-28 10:58:39 +0300929 intel_irq_fini(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100930 i915_workqueues_cleanup(dev_priv);
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000931 i915_engines_cleanup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100932}
933
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000934static int i915_mmio_setup(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100935{
David Weinehall52a05c32016-08-22 13:32:44 +0300936 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100937 int mmio_bar;
938 int mmio_size;
939
Tvrtko Ursulin5db94012016-10-13 11:03:10 +0100940 mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
Chris Wilson0673ad42016-06-24 14:00:22 +0100941 /*
942 * Before gen4, the registers and the GTT are behind different BARs.
943 * However, from gen4 onwards, the registers and the GTT are shared
944 * in the same BAR, so we want to restrict this ioremap from
945 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
946 * the register BAR remains the same size for all the earlier
947 * generations up to Ironlake.
948 */
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000949 if (INTEL_GEN(dev_priv) < 5)
Chris Wilson0673ad42016-06-24 14:00:22 +0100950 mmio_size = 512 * 1024;
951 else
952 mmio_size = 2 * 1024 * 1024;
David Weinehall52a05c32016-08-22 13:32:44 +0300953 dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
Chris Wilson0673ad42016-06-24 14:00:22 +0100954 if (dev_priv->regs == NULL) {
955 DRM_ERROR("failed to map registers\n");
956
957 return -EIO;
958 }
959
960 /* Try to make sure MCHBAR is enabled before poking at it */
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000961 intel_setup_mchbar(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100962
963 return 0;
964}
965
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000966static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100967{
David Weinehall52a05c32016-08-22 13:32:44 +0300968 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100969
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000970 intel_teardown_mchbar(dev_priv);
David Weinehall52a05c32016-08-22 13:32:44 +0300971 pci_iounmap(pdev, dev_priv->regs);
Chris Wilson0673ad42016-06-24 14:00:22 +0100972}
973
974/**
975 * i915_driver_init_mmio - setup device MMIO
976 * @dev_priv: device private
977 *
978 * Setup minimal device state necessary for MMIO accesses later in the
979 * initialization sequence. The setup here should avoid any other device-wide
980 * side effects or exposing the driver via kernel internal or user space
981 * interfaces.
982 */
983static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
984{
Chris Wilson0673ad42016-06-24 14:00:22 +0100985 int ret;
986
987 if (i915_inject_load_failure())
988 return -ENODEV;
989
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000990 if (i915_get_bridge_dev(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100991 return -EIO;
992
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000993 ret = i915_mmio_setup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100994 if (ret < 0)
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300995 goto err_bridge;
Chris Wilson0673ad42016-06-24 14:00:22 +0100996
997 intel_uncore_init(dev_priv);
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300998
999 ret = intel_engines_init_mmio(dev_priv);
1000 if (ret)
1001 goto err_uncore;
1002
Chris Wilson24145512017-01-24 11:01:35 +00001003 i915_gem_init_mmio(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001004
1005 return 0;
1006
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +03001007err_uncore:
1008 intel_uncore_fini(dev_priv);
1009err_bridge:
Chris Wilson0673ad42016-06-24 14:00:22 +01001010 pci_dev_put(dev_priv->bridge_dev);
1011
1012 return ret;
1013}
1014
1015/**
1016 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
1017 * @dev_priv: device private
1018 */
1019static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1020{
Chris Wilson0673ad42016-06-24 14:00:22 +01001021 intel_uncore_fini(dev_priv);
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +00001022 i915_mmio_cleanup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001023 pci_dev_put(dev_priv->bridge_dev);
1024}
1025
Chris Wilson94b4f3b2016-07-05 10:40:20 +01001026static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1027{
1028 i915.enable_execlists =
1029 intel_sanitize_enable_execlists(dev_priv,
1030 i915.enable_execlists);
1031
1032 /*
1033 * i915.enable_ppgtt is read-only, so do an early pass to validate the
1034 * user's requested state against the hardware/driver capabilities. We
1035 * do this now so that we can print out any log messages once rather
1036 * than every time we check intel_enable_ppgtt().
1037 */
1038 i915.enable_ppgtt =
1039 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
1040 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
Chris Wilson39df9192016-07-20 13:31:57 +01001041
1042 i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
Tvrtko Ursulin784f2f12017-02-20 10:46:57 +00001043 DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
Arkadiusz Hilerd2be9f22017-03-14 15:28:10 +01001044
1045 intel_uc_sanitize_options(dev_priv);
Chuanxiao Dong67b7f332017-05-27 17:44:17 +08001046
1047 intel_gvt_sanitize_options(dev_priv);
Chris Wilson94b4f3b2016-07-05 10:40:20 +01001048}
1049
Chris Wilson0673ad42016-06-24 14:00:22 +01001050/**
1051 * i915_driver_init_hw - setup state requiring device access
1052 * @dev_priv: device private
1053 *
1054 * Setup state that requires accessing the device, but doesn't require
1055 * exposing the driver via kernel internal or userspace interfaces.
1056 */
1057static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1058{
David Weinehall52a05c32016-08-22 13:32:44 +03001059 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +01001060 int ret;
1061
1062 if (i915_inject_load_failure())
1063 return -ENODEV;
1064
Chris Wilson94b4f3b2016-07-05 10:40:20 +01001065 intel_device_info_runtime_init(dev_priv);
1066
1067 intel_sanitize_options(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001068
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001069 ret = i915_ggtt_probe_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001070 if (ret)
1071 return ret;
1072
Chris Wilson0673ad42016-06-24 14:00:22 +01001073 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1074 * otherwise the vga fbdev driver falls over. */
1075 ret = i915_kick_out_firmware_fb(dev_priv);
1076 if (ret) {
1077 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1078 goto out_ggtt;
1079 }
1080
1081 ret = i915_kick_out_vgacon(dev_priv);
1082 if (ret) {
1083 DRM_ERROR("failed to remove conflicting VGA console\n");
1084 goto out_ggtt;
1085 }
1086
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001087 ret = i915_ggtt_init_hw(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +01001088 if (ret)
1089 return ret;
1090
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001091 ret = i915_ggtt_enable_hw(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +01001092 if (ret) {
1093 DRM_ERROR("failed to enable GGTT\n");
1094 goto out_ggtt;
1095 }
1096
David Weinehall52a05c32016-08-22 13:32:44 +03001097 pci_set_master(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001098
1099 /* overlay on gen2 is broken and can't address above 1G */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001100 if (IS_GEN2(dev_priv)) {
David Weinehall52a05c32016-08-22 13:32:44 +03001101 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
Chris Wilson0673ad42016-06-24 14:00:22 +01001102 if (ret) {
1103 DRM_ERROR("failed to set DMA mask\n");
1104
1105 goto out_ggtt;
1106 }
1107 }
1108
Chris Wilson0673ad42016-06-24 14:00:22 +01001109 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1110 * using 32bit addressing, overwriting memory if HWS is located
1111 * above 4GB.
1112 *
1113 * The documentation also mentions an issue with undefined
1114 * behaviour if any general state is accessed within a page above 4GB,
1115 * which also needs to be handled carefully.
1116 */
Jani Nikulac0f86832016-12-07 12:13:04 +02001117 if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
David Weinehall52a05c32016-08-22 13:32:44 +03001118 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Chris Wilson0673ad42016-06-24 14:00:22 +01001119
1120 if (ret) {
1121 DRM_ERROR("failed to set DMA mask\n");
1122
1123 goto out_ggtt;
1124 }
1125 }
1126
Chris Wilson0673ad42016-06-24 14:00:22 +01001127 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1128 PM_QOS_DEFAULT_VALUE);
1129
1130 intel_uncore_sanitize(dev_priv);
1131
1132 intel_opregion_setup(dev_priv);
1133
1134 i915_gem_load_init_fences(dev_priv);
1135
1136 /* On the 945G/GM, the chipset reports the MSI capability on the
1137 * integrated graphics even though the support isn't actually there
1138 * according to the published specs. It doesn't appear to function
1139 * correctly in testing on 945G.
1140 * This may be a side effect of MSI having been made available for PEG
1141 * and the registers being closely associated.
1142 *
1143 * According to chipset errata, on the 965GM, MSI interrupts may
1144 * be lost or delayed, but we use them anyways to avoid
1145 * stuck interrupts on some machines.
1146 */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001147 if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
David Weinehall52a05c32016-08-22 13:32:44 +03001148 if (pci_enable_msi(pdev) < 0)
Chris Wilson0673ad42016-06-24 14:00:22 +01001149 DRM_DEBUG_DRIVER("can't enable MSI");
1150 }
1151
Zhenyu Wang26f837e2017-01-13 10:46:09 +08001152 ret = intel_gvt_init(dev_priv);
1153 if (ret)
1154 goto out_ggtt;
1155
Chris Wilson0673ad42016-06-24 14:00:22 +01001156 return 0;
1157
1158out_ggtt:
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001159 i915_ggtt_cleanup_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001160
1161 return ret;
1162}
1163
1164/**
1165 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1166 * @dev_priv: device private
1167 */
1168static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1169{
David Weinehall52a05c32016-08-22 13:32:44 +03001170 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +01001171
David Weinehall52a05c32016-08-22 13:32:44 +03001172 if (pdev->msi_enabled)
1173 pci_disable_msi(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001174
1175 pm_qos_remove_request(&dev_priv->pm_qos);
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001176 i915_ggtt_cleanup_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001177}
1178
1179/**
1180 * i915_driver_register - register the driver with the rest of the system
1181 * @dev_priv: device private
1182 *
1183 * Perform any steps necessary to make the driver available via kernel
1184 * internal or userspace interfaces.
1185 */
1186static void i915_driver_register(struct drm_i915_private *dev_priv)
1187{
Chris Wilson91c8a322016-07-05 10:40:23 +01001188 struct drm_device *dev = &dev_priv->drm;
Chris Wilson0673ad42016-06-24 14:00:22 +01001189
1190 i915_gem_shrinker_init(dev_priv);
1191
1192 /*
1193 * Notify a valid surface after modesetting,
1194 * when running inside a VM.
1195 */
1196 if (intel_vgpu_active(dev_priv))
1197 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1198
1199 /* Reveal our presence to userspace */
1200 if (drm_dev_register(dev, 0) == 0) {
1201 i915_debugfs_register(dev_priv);
Michal Wajdeczkof9cda042017-01-13 17:41:57 +00001202 i915_guc_log_register(dev_priv);
David Weinehall694c2822016-08-22 13:32:43 +03001203 i915_setup_sysfs(dev_priv);
Robert Bragg442b8c02016-11-07 19:49:53 +00001204
1205 /* Depends on sysfs having been initialized */
1206 i915_perf_register(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001207 } else
1208 DRM_ERROR("Failed to register driver for userspace access!\n");
1209
1210 if (INTEL_INFO(dev_priv)->num_pipes) {
1211 /* Must be done after probing outputs */
1212 intel_opregion_register(dev_priv);
1213 acpi_video_register();
1214 }
1215
1216 if (IS_GEN5(dev_priv))
1217 intel_gpu_ips_init(dev_priv);
1218
Jerome Anandeef57322017-01-25 04:27:49 +05301219 intel_audio_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001220
1221 /*
1222 * Some ports require correctly set-up hpd registers for detection to
1223 * work properly (leading to ghost connected connector status), e.g. VGA
1224 * on gm45. Hence we can only set up the initial fbdev config after hpd
1225 * irqs are fully enabled. We do it last so that the async config
1226 * cannot run before the connectors are registered.
1227 */
1228 intel_fbdev_initial_config_async(dev);
1229}
1230
1231/**
1232 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1233 * @dev_priv: device private
1234 */
1235static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1236{
Jerome Anandeef57322017-01-25 04:27:49 +05301237 intel_audio_deinit(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001238
1239 intel_gpu_ips_teardown();
1240 acpi_video_unregister();
1241 intel_opregion_unregister(dev_priv);
1242
Robert Bragg442b8c02016-11-07 19:49:53 +00001243 i915_perf_unregister(dev_priv);
1244
David Weinehall694c2822016-08-22 13:32:43 +03001245 i915_teardown_sysfs(dev_priv);
Michal Wajdeczkof9cda042017-01-13 17:41:57 +00001246 i915_guc_log_unregister(dev_priv);
Chris Wilson91c8a322016-07-05 10:40:23 +01001247 drm_dev_unregister(&dev_priv->drm);
Chris Wilson0673ad42016-06-24 14:00:22 +01001248
1249 i915_gem_shrinker_cleanup(dev_priv);
1250}
1251
1252/**
1253 * i915_driver_load - setup chip and create an initial config
Joonas Lahtinend2ad3ae2016-11-10 15:36:34 +02001254 * @pdev: PCI device
1255 * @ent: matching PCI ID entry
Chris Wilson0673ad42016-06-24 14:00:22 +01001256 *
1257 * The driver load routine has to do several things:
1258 * - drive output discovery via intel_modeset_init()
1259 * - initialize the memory manager
1260 * - allocate initial config memory
1261 * - setup the DRM framebuffer with the allocated memory
1262 */
Chris Wilson42f55512016-06-24 14:00:26 +01001263int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
Chris Wilson0673ad42016-06-24 14:00:22 +01001264{
Maarten Lankhorst8d2b47d2017-02-02 08:41:42 +01001265 const struct intel_device_info *match_info =
1266 (struct intel_device_info *)ent->driver_data;
Chris Wilson0673ad42016-06-24 14:00:22 +01001267 struct drm_i915_private *dev_priv;
1268 int ret;
1269
Ville Syrjäläff4c3b72017-03-03 17:19:28 +02001270 /* Enable nuclear pageflip on ILK+ */
1271 if (!i915.nuclear_pageflip && match_info->gen < 5)
Maarten Lankhorst8d2b47d2017-02-02 08:41:42 +01001272 driver.driver_features &= ~DRIVER_ATOMIC;
Chris Wilsona09d0ba2016-06-24 14:00:27 +01001273
Chris Wilson0673ad42016-06-24 14:00:22 +01001274 ret = -ENOMEM;
1275 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1276 if (dev_priv)
1277 ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
1278 if (ret) {
Tvrtko Ursulin87a67522016-12-06 19:04:13 +00001279 DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
Chris Wilsoncad36882017-02-10 16:35:21 +00001280 goto out_free;
Chris Wilson0673ad42016-06-24 14:00:22 +01001281 }
1282
Chris Wilson0673ad42016-06-24 14:00:22 +01001283 dev_priv->drm.pdev = pdev;
1284 dev_priv->drm.dev_private = dev_priv;
Chris Wilson0673ad42016-06-24 14:00:22 +01001285
1286 ret = pci_enable_device(pdev);
1287 if (ret)
Chris Wilsoncad36882017-02-10 16:35:21 +00001288 goto out_fini;
Chris Wilson0673ad42016-06-24 14:00:22 +01001289
1290 pci_set_drvdata(pdev, &dev_priv->drm);
Imre Deakadfdf852017-05-02 15:04:09 +03001291 /*
1292 * Disable the system suspend direct complete optimization, which can
1293 * leave the device suspended skipping the driver's suspend handlers
1294 * if the device was already runtime suspended. This is needed due to
1295 * the difference in our runtime and system suspend sequence and
1296 * becaue the HDA driver may require us to enable the audio power
1297 * domain during system suspend.
1298 */
1299 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
Chris Wilson0673ad42016-06-24 14:00:22 +01001300
1301 ret = i915_driver_init_early(dev_priv, ent);
1302 if (ret < 0)
1303 goto out_pci_disable;
1304
1305 intel_runtime_pm_get(dev_priv);
1306
1307 ret = i915_driver_init_mmio(dev_priv);
1308 if (ret < 0)
1309 goto out_runtime_pm_put;
1310
1311 ret = i915_driver_init_hw(dev_priv);
1312 if (ret < 0)
1313 goto out_cleanup_mmio;
1314
1315 /*
1316 * TODO: move the vblank init and parts of modeset init steps into one
1317 * of the i915_driver_init_/i915_driver_register functions according
1318 * to the role/effect of the given init step.
1319 */
1320 if (INTEL_INFO(dev_priv)->num_pipes) {
Chris Wilson91c8a322016-07-05 10:40:23 +01001321 ret = drm_vblank_init(&dev_priv->drm,
Chris Wilson0673ad42016-06-24 14:00:22 +01001322 INTEL_INFO(dev_priv)->num_pipes);
1323 if (ret)
1324 goto out_cleanup_hw;
1325 }
1326
Chris Wilson91c8a322016-07-05 10:40:23 +01001327 ret = i915_load_modeset_init(&dev_priv->drm);
Chris Wilson0673ad42016-06-24 14:00:22 +01001328 if (ret < 0)
1329 goto out_cleanup_vblank;
1330
1331 i915_driver_register(dev_priv);
1332
1333 intel_runtime_pm_enable(dev_priv);
1334
Mahesh Kumara3a89862016-12-01 21:19:34 +05301335 dev_priv->ipc_enabled = false;
1336
Chris Wilson0525a062016-10-14 14:27:07 +01001337 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1338 DRM_INFO("DRM_I915_DEBUG enabled\n");
1339 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1340 DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
Chris Wilsonbc5ca472016-08-25 08:23:14 +01001341
Chris Wilson0673ad42016-06-24 14:00:22 +01001342 intel_runtime_pm_put(dev_priv);
1343
1344 return 0;
1345
1346out_cleanup_vblank:
Chris Wilson91c8a322016-07-05 10:40:23 +01001347 drm_vblank_cleanup(&dev_priv->drm);
Chris Wilson0673ad42016-06-24 14:00:22 +01001348out_cleanup_hw:
1349 i915_driver_cleanup_hw(dev_priv);
1350out_cleanup_mmio:
1351 i915_driver_cleanup_mmio(dev_priv);
1352out_runtime_pm_put:
1353 intel_runtime_pm_put(dev_priv);
1354 i915_driver_cleanup_early(dev_priv);
1355out_pci_disable:
1356 pci_disable_device(pdev);
Chris Wilsoncad36882017-02-10 16:35:21 +00001357out_fini:
Chris Wilson0673ad42016-06-24 14:00:22 +01001358 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
Chris Wilsoncad36882017-02-10 16:35:21 +00001359 drm_dev_fini(&dev_priv->drm);
1360out_free:
1361 kfree(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001362 return ret;
1363}
1364
Chris Wilson42f55512016-06-24 14:00:26 +01001365void i915_driver_unload(struct drm_device *dev)
Chris Wilson0673ad42016-06-24 14:00:22 +01001366{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001367 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001368 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +01001369
1370 intel_fbdev_fini(dev);
1371
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001372 if (i915_gem_suspend(dev_priv))
Chris Wilson42f55512016-06-24 14:00:26 +01001373 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
Chris Wilson0673ad42016-06-24 14:00:22 +01001374
1375 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1376
Daniel Vetter18dddad2017-03-21 17:41:49 +01001377 drm_atomic_helper_shutdown(dev);
Maarten Lankhorsta667fb42016-12-15 15:29:44 +01001378
Zhenyu Wang26f837e2017-01-13 10:46:09 +08001379 intel_gvt_cleanup(dev_priv);
1380
Chris Wilson0673ad42016-06-24 14:00:22 +01001381 i915_driver_unregister(dev_priv);
1382
1383 drm_vblank_cleanup(dev);
1384
1385 intel_modeset_cleanup(dev);
1386
1387 /*
1388 * free the memory space allocated for the child device
1389 * config parsed from VBT
1390 */
1391 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1392 kfree(dev_priv->vbt.child_dev);
1393 dev_priv->vbt.child_dev = NULL;
1394 dev_priv->vbt.child_dev_num = 0;
1395 }
1396 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1397 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1398 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1399 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1400
David Weinehall52a05c32016-08-22 13:32:44 +03001401 vga_switcheroo_unregister_client(pdev);
1402 vga_client_register(pdev, NULL, NULL, NULL);
Chris Wilson0673ad42016-06-24 14:00:22 +01001403
1404 intel_csr_ucode_fini(dev_priv);
1405
1406 /* Free error state after interrupts are fully disabled. */
1407 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001408 i915_reset_error_state(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001409
1410 /* Flush any outstanding unpin_work. */
Chris Wilsonb7137e02016-07-13 09:10:37 +01001411 drain_workqueue(dev_priv->wq);
Chris Wilson0673ad42016-06-24 14:00:22 +01001412
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01001413 i915_gem_fini(dev_priv);
Oscar Mateo3950bf32017-03-22 10:39:46 -07001414 intel_uc_fini_fw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001415 intel_fbc_cleanup_cfb(dev_priv);
1416
1417 intel_power_domains_fini(dev_priv);
1418
1419 i915_driver_cleanup_hw(dev_priv);
1420 i915_driver_cleanup_mmio(dev_priv);
1421
1422 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Chris Wilsoncad36882017-02-10 16:35:21 +00001423}
1424
1425static void i915_driver_release(struct drm_device *dev)
1426{
1427 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001428
1429 i915_driver_cleanup_early(dev_priv);
Chris Wilsoncad36882017-02-10 16:35:21 +00001430 drm_dev_fini(&dev_priv->drm);
1431
1432 kfree(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001433}
1434
1435static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1436{
Chris Wilson829a0af2017-06-20 12:05:45 +01001437 struct drm_i915_private *i915 = to_i915(dev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001438 int ret;
1439
Chris Wilson829a0af2017-06-20 12:05:45 +01001440 ret = i915_gem_open(i915, file);
Chris Wilson0673ad42016-06-24 14:00:22 +01001441 if (ret)
1442 return ret;
1443
1444 return 0;
1445}
1446
1447/**
1448 * i915_driver_lastclose - clean up after all DRM clients have exited
1449 * @dev: DRM device
1450 *
1451 * Take care of cleaning up after all DRM clients have exited. In the
1452 * mode setting case, we want to restore the kernel's initial mode (just
1453 * in case the last client left us in a bad state).
1454 *
1455 * Additionally, in the non-mode setting case, we'll tear down the GTT
1456 * and DMA structures, since the kernel won't be using them, and clea
1457 * up any GEM state.
1458 */
1459static void i915_driver_lastclose(struct drm_device *dev)
1460{
1461 intel_fbdev_restore_mode(dev);
1462 vga_switcheroo_process_delayed_switch();
1463}
1464
Daniel Vetter7d2ec882017-03-08 15:12:45 +01001465static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
Chris Wilson0673ad42016-06-24 14:00:22 +01001466{
Daniel Vetter7d2ec882017-03-08 15:12:45 +01001467 struct drm_i915_file_private *file_priv = file->driver_priv;
1468
Chris Wilson0673ad42016-06-24 14:00:22 +01001469 mutex_lock(&dev->struct_mutex);
Chris Wilson829a0af2017-06-20 12:05:45 +01001470 i915_gem_context_close(file);
Chris Wilson0673ad42016-06-24 14:00:22 +01001471 i915_gem_release(dev, file);
1472 mutex_unlock(&dev->struct_mutex);
Chris Wilson0673ad42016-06-24 14:00:22 +01001473
1474 kfree(file_priv);
1475}
1476
Imre Deak07f9cd02014-08-18 14:42:45 +03001477static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1478{
Chris Wilson91c8a322016-07-05 10:40:23 +01001479 struct drm_device *dev = &dev_priv->drm;
Jani Nikula19c80542015-12-16 12:48:16 +02001480 struct intel_encoder *encoder;
Imre Deak07f9cd02014-08-18 14:42:45 +03001481
1482 drm_modeset_lock_all(dev);
Jani Nikula19c80542015-12-16 12:48:16 +02001483 for_each_intel_encoder(dev, encoder)
1484 if (encoder->suspend)
1485 encoder->suspend(encoder);
Imre Deak07f9cd02014-08-18 14:42:45 +03001486 drm_modeset_unlock_all(dev);
1487}
1488
Paulo Zanoni1a5df182014-10-27 17:54:32 -02001489static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1490 bool rpm_resume);
Imre Deak507e1262016-04-20 20:27:54 +03001491static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
Suketu Shahf75a1982015-04-16 14:22:11 +05301492
Imre Deakbc872292015-11-18 17:32:30 +02001493static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1494{
1495#if IS_ENABLED(CONFIG_ACPI_SLEEP)
1496 if (acpi_target_system_state() < ACPI_STATE_S3)
1497 return true;
1498#endif
1499 return false;
1500}
Sagar Kambleebc32822014-08-13 23:07:05 +05301501
Imre Deak5e365c32014-10-23 19:23:25 +03001502static int i915_drm_suspend(struct drm_device *dev)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001503{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001504 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001505 struct pci_dev *pdev = dev_priv->drm.pdev;
Jesse Barnese5747e32014-06-12 08:35:47 -07001506 pci_power_t opregion_target_state;
Daniel Vetterd5818932015-02-23 12:03:26 +01001507 int error;
Rafael J. Wysocki61caf872010-02-18 23:06:27 +01001508
Zhang Ruib8efb172013-02-05 15:41:53 +08001509 /* ignore lid events during suspend */
1510 mutex_lock(&dev_priv->modeset_restore_lock);
1511 dev_priv->modeset_restore = MODESET_SUSPENDED;
1512 mutex_unlock(&dev_priv->modeset_restore_lock);
1513
Imre Deak1f814da2015-12-16 02:52:19 +02001514 disable_rpm_wakeref_asserts(dev_priv);
1515
Paulo Zanonic67a4702013-08-19 13:18:09 -03001516 /* We do a lot of poking in a lot of registers, make sure they work
1517 * properly. */
Imre Deakda7e29b2014-02-18 00:02:02 +02001518 intel_display_set_init_power(dev_priv, true);
Paulo Zanonicb107992013-01-25 16:59:15 -02001519
Dave Airlie5bcf7192010-12-07 09:20:40 +10001520 drm_kms_helper_poll_disable(dev);
1521
David Weinehall52a05c32016-08-22 13:32:44 +03001522 pci_save_state(pdev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001523
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001524 error = i915_gem_suspend(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001525 if (error) {
David Weinehall52a05c32016-08-22 13:32:44 +03001526 dev_err(&pdev->dev,
Daniel Vetterd5818932015-02-23 12:03:26 +01001527 "GEM idle failed, resume might fail\n");
Imre Deak1f814da2015-12-16 02:52:19 +02001528 goto out;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001529 }
1530
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02001531 intel_display_suspend(dev);
Daniel Vetterd5818932015-02-23 12:03:26 +01001532
1533 intel_dp_mst_suspend(dev);
1534
1535 intel_runtime_pm_disable_interrupts(dev_priv);
1536 intel_hpd_cancel_work(dev_priv);
1537
1538 intel_suspend_encoders(dev_priv);
1539
Ville Syrjälä712bf362016-10-31 22:37:23 +02001540 intel_suspend_hw(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001541
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001542 i915_gem_suspend_gtt_mappings(dev_priv);
Ben Widawsky828c7902013-10-16 09:21:30 -07001543
Tvrtko Ursulinaf6dc742016-12-01 14:16:44 +00001544 i915_save_state(dev_priv);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001545
Imre Deakbc872292015-11-18 17:32:30 +02001546 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
Chris Wilson6f9f4b72016-05-23 15:08:09 +01001547 intel_opregion_notify_adapter(dev_priv, opregion_target_state);
Jesse Barnese5747e32014-06-12 08:35:47 -07001548
Hans de Goede68f60942017-02-10 11:28:01 +01001549 intel_uncore_suspend(dev_priv);
Chris Wilson03d92e42016-05-23 15:08:10 +01001550 intel_opregion_unregister(dev_priv);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001551
Chris Wilson82e3b8c2014-08-13 13:09:46 +01001552 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
Dave Airlie3fa016a2012-03-28 10:48:49 +01001553
Mika Kuoppala62d5d692014-02-25 17:11:28 +02001554 dev_priv->suspend_count++;
1555
Imre Deakf74ed082016-04-18 14:48:21 +03001556 intel_csr_ucode_suspend(dev_priv);
Imre Deakf514c2d2015-10-28 23:59:06 +02001557
Imre Deak1f814da2015-12-16 02:52:19 +02001558out:
1559 enable_rpm_wakeref_asserts(dev_priv);
1560
1561 return error;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001562}
1563
David Weinehallc49d13e2016-08-22 13:32:42 +03001564static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
Imre Deakc3c09c92014-10-23 19:23:15 +03001565{
David Weinehallc49d13e2016-08-22 13:32:42 +03001566 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001567 struct pci_dev *pdev = dev_priv->drm.pdev;
Imre Deakbc872292015-11-18 17:32:30 +02001568 bool fw_csr;
Imre Deakc3c09c92014-10-23 19:23:15 +03001569 int ret;
1570
Imre Deak1f814da2015-12-16 02:52:19 +02001571 disable_rpm_wakeref_asserts(dev_priv);
1572
Imre Deak4c494a52016-10-13 14:34:06 +03001573 intel_display_set_init_power(dev_priv, false);
1574
Rodrigo Vivib9fd7992016-12-16 17:42:25 +02001575 fw_csr = !IS_GEN9_LP(dev_priv) &&
Imre Deaka7c81252016-04-01 16:02:38 +03001576 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
Imre Deakbc872292015-11-18 17:32:30 +02001577 /*
1578 * In case of firmware assisted context save/restore don't manually
1579 * deinit the power domains. This also means the CSR/DMC firmware will
1580 * stay active, it will power down any HW resources as required and
1581 * also enable deeper system power states that would be blocked if the
1582 * firmware was inactive.
1583 */
1584 if (!fw_csr)
1585 intel_power_domains_suspend(dev_priv);
Imre Deak73dfc222015-11-17 17:33:53 +02001586
Imre Deak507e1262016-04-20 20:27:54 +03001587 ret = 0;
Rodrigo Vivib9fd7992016-12-16 17:42:25 +02001588 if (IS_GEN9_LP(dev_priv))
Imre Deak507e1262016-04-20 20:27:54 +03001589 bxt_enable_dc9(dev_priv);
Imre Deakb8aea3d12016-04-20 20:27:55 +03001590 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Imre Deak507e1262016-04-20 20:27:54 +03001591 hsw_enable_pc8(dev_priv);
1592 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1593 ret = vlv_suspend_complete(dev_priv);
Imre Deakc3c09c92014-10-23 19:23:15 +03001594
1595 if (ret) {
1596 DRM_ERROR("Suspend complete failed: %d\n", ret);
Imre Deakbc872292015-11-18 17:32:30 +02001597 if (!fw_csr)
1598 intel_power_domains_init_hw(dev_priv, true);
Imre Deakc3c09c92014-10-23 19:23:15 +03001599
Imre Deak1f814da2015-12-16 02:52:19 +02001600 goto out;
Imre Deakc3c09c92014-10-23 19:23:15 +03001601 }
1602
David Weinehall52a05c32016-08-22 13:32:44 +03001603 pci_disable_device(pdev);
Imre Deakab3be732015-03-02 13:04:41 +02001604 /*
Imre Deak54875572015-06-30 17:06:47 +03001605 * During hibernation on some platforms the BIOS may try to access
Imre Deakab3be732015-03-02 13:04:41 +02001606 * the device even though it's already in D3 and hang the machine. So
1607 * leave the device in D0 on those platforms and hope the BIOS will
Imre Deak54875572015-06-30 17:06:47 +03001608 * power down the device properly. The issue was seen on multiple old
1609 * GENs with different BIOS vendors, so having an explicit blacklist
1610 * is inpractical; apply the workaround on everything pre GEN6. The
1611 * platforms where the issue was seen:
1612 * Lenovo Thinkpad X301, X61s, X60, T60, X41
1613 * Fujitsu FSC S7110
1614 * Acer Aspire 1830T
Imre Deakab3be732015-03-02 13:04:41 +02001615 */
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +00001616 if (!(hibernation && INTEL_GEN(dev_priv) < 6))
David Weinehall52a05c32016-08-22 13:32:44 +03001617 pci_set_power_state(pdev, PCI_D3hot);
Imre Deakc3c09c92014-10-23 19:23:15 +03001618
Imre Deakbc872292015-11-18 17:32:30 +02001619 dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
1620
Imre Deak1f814da2015-12-16 02:52:19 +02001621out:
1622 enable_rpm_wakeref_asserts(dev_priv);
1623
1624 return ret;
Imre Deakc3c09c92014-10-23 19:23:15 +03001625}
1626
Matthew Aulda9a251c2016-12-02 10:24:11 +00001627static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001628{
1629 int error;
1630
Chris Wilsonded8b072016-07-05 10:40:22 +01001631 if (!dev) {
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001632 DRM_ERROR("dev: %p\n", dev);
Keith Packard1ae8c0a2009-06-28 15:42:17 -07001633 DRM_ERROR("DRM not initialized, aborting suspend.\n");
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001634 return -ENODEV;
1635 }
1636
Imre Deak0b14cbd2014-09-10 18:16:55 +03001637 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
1638 state.event != PM_EVENT_FREEZE))
1639 return -EINVAL;
Dave Airlie5bcf7192010-12-07 09:20:40 +10001640
1641 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1642 return 0;
Chris Wilson6eecba32010-09-08 09:45:11 +01001643
Imre Deak5e365c32014-10-23 19:23:25 +03001644 error = i915_drm_suspend(dev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001645 if (error)
1646 return error;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001647
Imre Deakab3be732015-03-02 13:04:41 +02001648 return i915_drm_suspend_late(dev, false);
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001649}
1650
Imre Deak5e365c32014-10-23 19:23:25 +03001651static int i915_drm_resume(struct drm_device *dev)
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001652{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001653 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläac840ae2016-05-06 21:35:55 +03001654 int ret;
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +01001655
Imre Deak1f814da2015-12-16 02:52:19 +02001656 disable_rpm_wakeref_asserts(dev_priv);
Chris Wilsonabc80ab2016-08-24 10:27:01 +01001657 intel_sanitize_gt_powersave(dev_priv);
Imre Deak1f814da2015-12-16 02:52:19 +02001658
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001659 ret = i915_ggtt_enable_hw(dev_priv);
Ville Syrjäläac840ae2016-05-06 21:35:55 +03001660 if (ret)
1661 DRM_ERROR("failed to re-enable GGTT\n");
1662
Imre Deakf74ed082016-04-18 14:48:21 +03001663 intel_csr_ucode_resume(dev_priv);
1664
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001665 i915_gem_resume(dev_priv);
Paulo Zanoni9d49c0e2013-09-12 18:06:43 -03001666
Tvrtko Ursulinaf6dc742016-12-01 14:16:44 +00001667 i915_restore_state(dev_priv);
Imre Deak8090ba82016-08-10 14:07:33 +03001668 intel_pps_unlock_regs_wa(dev_priv);
Chris Wilson6f9f4b72016-05-23 15:08:09 +01001669 intel_opregion_setup(dev_priv);
Rafael J. Wysocki61caf872010-02-18 23:06:27 +01001670
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02001671 intel_init_pch_refclk(dev_priv);
Chris Wilson1833b132012-05-09 11:56:28 +01001672
Peter Antoine364aece2015-05-11 08:50:45 +01001673 /*
1674 * Interrupts have to be enabled before any batches are run. If not the
1675 * GPU will hang. i915_gem_init_hw() will initiate batches to
1676 * update/restore the context.
1677 *
Imre Deak908764f2016-11-29 21:40:29 +02001678 * drm_mode_config_reset() needs AUX interrupts.
1679 *
Peter Antoine364aece2015-05-11 08:50:45 +01001680 * Modeset enabling in intel_modeset_init_hw() also needs working
1681 * interrupts.
1682 */
1683 intel_runtime_pm_enable_interrupts(dev_priv);
1684
Imre Deak908764f2016-11-29 21:40:29 +02001685 drm_mode_config_reset(dev);
1686
Daniel Vetterd5818932015-02-23 12:03:26 +01001687 mutex_lock(&dev->struct_mutex);
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001688 if (i915_gem_init_hw(dev_priv)) {
Daniel Vetterd5818932015-02-23 12:03:26 +01001689 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
Chris Wilson821ed7d2016-09-09 14:11:53 +01001690 i915_gem_set_wedged(dev_priv);
Jesse Barnesd5bb0812011-01-05 12:01:26 -08001691 }
Daniel Vetterd5818932015-02-23 12:03:26 +01001692 mutex_unlock(&dev->struct_mutex);
1693
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001694 intel_guc_resume(dev_priv);
Alex Daia1c41992015-09-30 09:46:37 -07001695
Daniel Vetterd5818932015-02-23 12:03:26 +01001696 intel_modeset_init_hw(dev);
1697
1698 spin_lock_irq(&dev_priv->irq_lock);
1699 if (dev_priv->display.hpd_irq_setup)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001700 dev_priv->display.hpd_irq_setup(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001701 spin_unlock_irq(&dev_priv->irq_lock);
1702
Daniel Vetterd5818932015-02-23 12:03:26 +01001703 intel_dp_mst_resume(dev);
1704
Lyudea16b7652016-03-11 10:57:01 -05001705 intel_display_resume(dev);
1706
Lyudee0b70062016-11-01 21:06:30 -04001707 drm_kms_helper_poll_enable(dev);
1708
Daniel Vetterd5818932015-02-23 12:03:26 +01001709 /*
1710 * ... but also need to make sure that hotplug processing
1711 * doesn't cause havoc. Like in the driver load code we don't
1712 * bother with the tiny race here where we might loose hotplug
1713 * notifications.
1714 * */
1715 intel_hpd_init(dev_priv);
Jesse Barnes1daed3f2011-01-05 12:01:25 -08001716
Chris Wilson03d92e42016-05-23 15:08:10 +01001717 intel_opregion_register(dev_priv);
Chris Wilson44834a62010-08-19 16:09:23 +01001718
Chris Wilson82e3b8c2014-08-13 13:09:46 +01001719 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
Jesse Barnes073f34d2012-11-02 11:13:59 -07001720
Zhang Ruib8efb172013-02-05 15:41:53 +08001721 mutex_lock(&dev_priv->modeset_restore_lock);
1722 dev_priv->modeset_restore = MODESET_DONE;
1723 mutex_unlock(&dev_priv->modeset_restore_lock);
Paulo Zanoni8a187452013-12-06 20:32:13 -02001724
Chris Wilson6f9f4b72016-05-23 15:08:09 +01001725 intel_opregion_notify_adapter(dev_priv, PCI_D0);
Jesse Barnese5747e32014-06-12 08:35:47 -07001726
Chris Wilson54b4f682016-07-21 21:16:19 +01001727 intel_autoenable_gt_powersave(dev_priv);
Imre Deakee6f2802014-10-23 19:23:22 +03001728
Imre Deak1f814da2015-12-16 02:52:19 +02001729 enable_rpm_wakeref_asserts(dev_priv);
1730
Chris Wilson074c6ad2014-04-09 09:19:43 +01001731 return 0;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001732}
1733
Imre Deak5e365c32014-10-23 19:23:25 +03001734static int i915_drm_resume_early(struct drm_device *dev)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001735{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001736 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001737 struct pci_dev *pdev = dev_priv->drm.pdev;
Imre Deak44410cd2016-04-18 14:45:54 +03001738 int ret;
Imre Deak36d61e62014-10-23 19:23:24 +03001739
Imre Deak76c4b252014-04-01 19:55:22 +03001740 /*
1741 * We have a resume ordering issue with the snd-hda driver also
1742 * requiring our device to be power up. Due to the lack of a
1743 * parent/child relationship we currently solve this with an early
1744 * resume hook.
1745 *
1746 * FIXME: This should be solved with a special hdmi sink device or
1747 * similar so that power domains can be employed.
1748 */
Imre Deak44410cd2016-04-18 14:45:54 +03001749
1750 /*
1751 * Note that we need to set the power state explicitly, since we
1752 * powered off the device during freeze and the PCI core won't power
1753 * it back up for us during thaw. Powering off the device during
1754 * freeze is not a hard requirement though, and during the
1755 * suspend/resume phases the PCI core makes sure we get here with the
1756 * device powered on. So in case we change our freeze logic and keep
1757 * the device powered we can also remove the following set power state
1758 * call.
1759 */
David Weinehall52a05c32016-08-22 13:32:44 +03001760 ret = pci_set_power_state(pdev, PCI_D0);
Imre Deak44410cd2016-04-18 14:45:54 +03001761 if (ret) {
1762 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
1763 goto out;
1764 }
1765
1766 /*
1767 * Note that pci_enable_device() first enables any parent bridge
1768 * device and only then sets the power state for this device. The
1769 * bridge enabling is a nop though, since bridge devices are resumed
1770 * first. The order of enabling power and enabling the device is
1771 * imposed by the PCI core as described above, so here we preserve the
1772 * same order for the freeze/thaw phases.
1773 *
1774 * TODO: eventually we should remove pci_disable_device() /
1775 * pci_enable_enable_device() from suspend/resume. Due to how they
1776 * depend on the device enable refcount we can't anyway depend on them
1777 * disabling/enabling the device.
1778 */
David Weinehall52a05c32016-08-22 13:32:44 +03001779 if (pci_enable_device(pdev)) {
Imre Deakbc872292015-11-18 17:32:30 +02001780 ret = -EIO;
1781 goto out;
1782 }
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001783
David Weinehall52a05c32016-08-22 13:32:44 +03001784 pci_set_master(pdev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001785
Imre Deak1f814da2015-12-16 02:52:19 +02001786 disable_rpm_wakeref_asserts(dev_priv);
1787
Wayne Boyer666a4532015-12-09 12:29:35 -08001788 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Paulo Zanoni1a5df182014-10-27 17:54:32 -02001789 ret = vlv_resume_prepare(dev_priv, false);
Imre Deak36d61e62014-10-23 19:23:24 +03001790 if (ret)
Damien Lespiauff0b1872015-05-20 14:45:15 +01001791 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
1792 ret);
Imre Deak36d61e62014-10-23 19:23:24 +03001793
Hans de Goede68f60942017-02-10 11:28:01 +01001794 intel_uncore_resume_early(dev_priv);
Paulo Zanoniefee8332014-10-27 17:54:33 -02001795
Rodrigo Vivib9fd7992016-12-16 17:42:25 +02001796 if (IS_GEN9_LP(dev_priv)) {
Imre Deakda2f41d2016-04-20 20:27:56 +03001797 if (!dev_priv->suspended_to_idle)
1798 gen9_sanitize_dc_state(dev_priv);
Imre Deak507e1262016-04-20 20:27:54 +03001799 bxt_disable_dc9(dev_priv);
Imre Deakda2f41d2016-04-20 20:27:56 +03001800 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Damien Lespiaua9a6b732015-05-20 14:45:14 +01001801 hsw_disable_pc8(dev_priv);
Imre Deakda2f41d2016-04-20 20:27:56 +03001802 }
Paulo Zanoniefee8332014-10-27 17:54:33 -02001803
Chris Wilsondc979972016-05-10 14:10:04 +01001804 intel_uncore_sanitize(dev_priv);
Imre Deakbc872292015-11-18 17:32:30 +02001805
Rodrigo Vivib9fd7992016-12-16 17:42:25 +02001806 if (IS_GEN9_LP(dev_priv) ||
Imre Deaka7c81252016-04-01 16:02:38 +03001807 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
Imre Deakbc872292015-11-18 17:32:30 +02001808 intel_power_domains_init_hw(dev_priv, true);
1809
Chris Wilson24145512017-01-24 11:01:35 +00001810 i915_gem_sanitize(dev_priv);
1811
Imre Deak6e35e8a2016-04-18 10:04:19 +03001812 enable_rpm_wakeref_asserts(dev_priv);
1813
Imre Deakbc872292015-11-18 17:32:30 +02001814out:
1815 dev_priv->suspended_to_idle = false;
Imre Deak36d61e62014-10-23 19:23:24 +03001816
1817 return ret;
Imre Deak76c4b252014-04-01 19:55:22 +03001818}
1819
Tvrtko Ursulin7f26cb82016-12-01 14:16:41 +00001820static int i915_resume_switcheroo(struct drm_device *dev)
Imre Deak76c4b252014-04-01 19:55:22 +03001821{
Imre Deak50a00722014-10-23 19:23:17 +03001822 int ret;
Imre Deak76c4b252014-04-01 19:55:22 +03001823
Imre Deak097dd832014-10-23 19:23:19 +03001824 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1825 return 0;
1826
Imre Deak5e365c32014-10-23 19:23:25 +03001827 ret = i915_drm_resume_early(dev);
Imre Deak50a00722014-10-23 19:23:17 +03001828 if (ret)
1829 return ret;
1830
Imre Deak5a175142014-10-23 19:23:18 +03001831 return i915_drm_resume(dev);
1832}
1833
Ben Gamari11ed50e2009-09-14 17:48:45 -04001834/**
Eugeni Dodonovf3953dc2011-11-28 16:15:17 -02001835 * i915_reset - reset chip after a hang
Michel Thierrydf210572017-01-11 20:18:09 -08001836 * @dev_priv: device private to reset
Ben Gamari11ed50e2009-09-14 17:48:45 -04001837 *
Chris Wilson780f2622016-09-09 14:11:52 +01001838 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
1839 * on failure.
Ben Gamari11ed50e2009-09-14 17:48:45 -04001840 *
Chris Wilson221fe792016-09-09 14:11:51 +01001841 * Caller must hold the struct_mutex.
1842 *
Ben Gamari11ed50e2009-09-14 17:48:45 -04001843 * Procedure is fairly simple:
1844 * - reset the chip using the reset reg
1845 * - re-init context state
1846 * - re-init hardware status page
1847 * - re-init ring buffer
1848 * - re-init interrupt state
1849 * - re-init display
1850 */
Chris Wilson780f2622016-09-09 14:11:52 +01001851void i915_reset(struct drm_i915_private *dev_priv)
Ben Gamari11ed50e2009-09-14 17:48:45 -04001852{
Chris Wilsond98c52c2016-04-13 17:35:05 +01001853 struct i915_gpu_error *error = &dev_priv->gpu_error;
Kenneth Graunke0573ed42010-09-11 03:17:19 -07001854 int ret;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001855
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001856 lockdep_assert_held(&dev_priv->drm.struct_mutex);
Chris Wilson8c185ec2017-03-16 17:13:02 +00001857 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
Chris Wilson221fe792016-09-09 14:11:51 +01001858
Chris Wilson8c185ec2017-03-16 17:13:02 +00001859 if (!test_bit(I915_RESET_HANDOFF, &error->flags))
Chris Wilson780f2622016-09-09 14:11:52 +01001860 return;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001861
Chris Wilsond98c52c2016-04-13 17:35:05 +01001862 /* Clear any previous failed attempts at recovery. Time to try again. */
Chris Wilson2e8f9d32017-03-16 17:13:04 +00001863 if (!i915_gem_unset_wedged(dev_priv))
1864 goto wakeup;
1865
Chris Wilson8af29b02016-09-09 14:11:47 +01001866 error->reset_count++;
Chris Wilsond98c52c2016-04-13 17:35:05 +01001867
Chris Wilson7b4d3a12016-07-04 08:08:37 +01001868 pr_notice("drm/i915: Resetting chip after gpu hang\n");
Chris Wilson4c965542017-01-17 17:59:01 +02001869 disable_irq(dev_priv->drm.irq);
Chris Wilson0e178ae2017-01-17 17:59:06 +02001870 ret = i915_gem_reset_prepare(dev_priv);
1871 if (ret) {
1872 DRM_ERROR("GPU recovery failed\n");
1873 intel_gpu_reset(dev_priv, ALL_ENGINES);
1874 goto error;
1875 }
Chris Wilson9e60ab02016-10-04 21:11:28 +01001876
Chris Wilsondc979972016-05-10 14:10:04 +01001877 ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
Kenneth Graunke0573ed42010-09-11 03:17:19 -07001878 if (ret) {
Chris Wilson804e59a2016-04-13 17:35:09 +01001879 if (ret != -ENODEV)
1880 DRM_ERROR("Failed to reset chip: %i\n", ret);
1881 else
1882 DRM_DEBUG_DRIVER("GPU reset disabled\n");
Chris Wilsond98c52c2016-04-13 17:35:05 +01001883 goto error;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001884 }
1885
Chris Wilsond8027092017-02-08 14:30:32 +00001886 i915_gem_reset(dev_priv);
Ville Syrjälä1362b772014-11-26 17:07:29 +02001887 intel_overlay_reset(dev_priv);
1888
Ben Gamari11ed50e2009-09-14 17:48:45 -04001889 /* Ok, now get things going again... */
1890
1891 /*
1892 * Everything depends on having the GTT running, so we need to start
1893 * there. Fortunately we don't need to do this unless we reset the
1894 * chip at a PCI level.
1895 *
1896 * Next we need to restore the context, but we don't use those
1897 * yet either...
1898 *
1899 * Ring buffer needs to be re-initialized in the KMS case, or if X
1900 * was running at the time of the reset (i.e. we weren't VT
1901 * switched away).
1902 */
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001903 ret = i915_gem_init_hw(dev_priv);
Daniel Vetter33d30a92015-02-23 12:03:27 +01001904 if (ret) {
1905 DRM_ERROR("Failed hw init on reset %d\n", ret);
Chris Wilsond98c52c2016-04-13 17:35:05 +01001906 goto error;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001907 }
1908
Chris Wilsonc2a126a2016-11-22 14:41:19 +00001909 i915_queue_hangcheck(dev_priv);
1910
Chris Wilson2e8f9d32017-03-16 17:13:04 +00001911finish:
Chris Wilson8d613c52017-02-12 17:19:59 +00001912 i915_gem_reset_finish(dev_priv);
Chris Wilson4c965542017-01-17 17:59:01 +02001913 enable_irq(dev_priv->drm.irq);
Chris Wilson8c185ec2017-03-16 17:13:02 +00001914
Chris Wilson2e8f9d32017-03-16 17:13:04 +00001915wakeup:
Chris Wilson8c185ec2017-03-16 17:13:02 +00001916 clear_bit(I915_RESET_HANDOFF, &error->flags);
1917 wake_up_bit(&error->flags, I915_RESET_HANDOFF);
Chris Wilson780f2622016-09-09 14:11:52 +01001918 return;
Chris Wilsond98c52c2016-04-13 17:35:05 +01001919
1920error:
Chris Wilson821ed7d2016-09-09 14:11:53 +01001921 i915_gem_set_wedged(dev_priv);
Chris Wilson36703e72017-06-22 11:56:25 +01001922 i915_gem_retire_requests(dev_priv);
Chris Wilson2e8f9d32017-03-16 17:13:04 +00001923 goto finish;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001924}
1925
Michel Thierry142bc7d2017-06-20 10:57:46 +01001926/**
1927 * i915_reset_engine - reset GPU engine to recover from a hang
1928 * @engine: engine to reset
1929 *
1930 * Reset a specific GPU engine. Useful if a hang is detected.
1931 * Returns zero on successful reset or otherwise an error code.
Michel Thierrya1ef70e2017-06-20 10:57:47 +01001932 *
1933 * Procedure is:
1934 * - identifies the request that caused the hang and it is dropped
1935 * - reset engine (which will force the engine to idle)
1936 * - re-init/configure engine
Michel Thierry142bc7d2017-06-20 10:57:46 +01001937 */
1938int i915_reset_engine(struct intel_engine_cs *engine)
1939{
Michel Thierrya1ef70e2017-06-20 10:57:47 +01001940 struct i915_gpu_error *error = &engine->i915->gpu_error;
1941 struct drm_i915_gem_request *active_request;
1942 int ret;
1943
1944 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
1945
1946 DRM_DEBUG_DRIVER("resetting %s\n", engine->name);
1947
1948 active_request = i915_gem_reset_prepare_engine(engine);
1949 if (IS_ERR(active_request)) {
1950 DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
1951 ret = PTR_ERR(active_request);
1952 goto out;
1953 }
1954
1955 /*
1956 * The request that caused the hang is stuck on elsp, we know the
1957 * active request and can drop it, adjust head to skip the offending
1958 * request to resume executing remaining requests in the queue.
1959 */
1960 i915_gem_reset_engine(engine, active_request);
1961
1962 /* Finally, reset just this engine. */
1963 ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine));
1964
1965 i915_gem_reset_finish_engine(engine);
1966
1967 if (ret) {
1968 /* If we fail here, we expect to fallback to a global reset */
1969 DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n",
1970 engine->name, ret);
1971 goto out;
1972 }
1973
1974 /*
1975 * The engine and its registers (and workarounds in case of render)
1976 * have been reset to their default values. Follow the init_ring
1977 * process to program RING_MODE, HWSP and re-enable submission.
1978 */
1979 ret = engine->init_hw(engine);
Michel Thierry702c8f82017-06-20 10:57:48 +01001980 if (ret)
1981 goto out;
Michel Thierrya1ef70e2017-06-20 10:57:47 +01001982
Michel Thierry702c8f82017-06-20 10:57:48 +01001983 error->reset_engine_count[engine->id]++;
Michel Thierrya1ef70e2017-06-20 10:57:47 +01001984out:
1985 return ret;
Michel Thierry142bc7d2017-06-20 10:57:46 +01001986}
1987
David Weinehallc49d13e2016-08-22 13:32:42 +03001988static int i915_pm_suspend(struct device *kdev)
Kristian Høgsberg112b7152009-01-04 16:55:33 -05001989{
David Weinehallc49d13e2016-08-22 13:32:42 +03001990 struct pci_dev *pdev = to_pci_dev(kdev);
1991 struct drm_device *dev = pci_get_drvdata(pdev);
Kristian Høgsberg112b7152009-01-04 16:55:33 -05001992
David Weinehallc49d13e2016-08-22 13:32:42 +03001993 if (!dev) {
1994 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001995 return -ENODEV;
1996 }
Kristian Høgsberg112b7152009-01-04 16:55:33 -05001997
David Weinehallc49d13e2016-08-22 13:32:42 +03001998 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Dave Airlie5bcf7192010-12-07 09:20:40 +10001999 return 0;
2000
David Weinehallc49d13e2016-08-22 13:32:42 +03002001 return i915_drm_suspend(dev);
Imre Deak76c4b252014-04-01 19:55:22 +03002002}
2003
David Weinehallc49d13e2016-08-22 13:32:42 +03002004static int i915_pm_suspend_late(struct device *kdev)
Imre Deak76c4b252014-04-01 19:55:22 +03002005{
David Weinehallc49d13e2016-08-22 13:32:42 +03002006 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Imre Deak76c4b252014-04-01 19:55:22 +03002007
2008 /*
Damien Lespiauc965d9952015-05-18 19:53:48 +01002009 * We have a suspend ordering issue with the snd-hda driver also
Imre Deak76c4b252014-04-01 19:55:22 +03002010 * requiring our device to be power up. Due to the lack of a
2011 * parent/child relationship we currently solve this with an late
2012 * suspend hook.
2013 *
2014 * FIXME: This should be solved with a special hdmi sink device or
2015 * similar so that power domains can be employed.
2016 */
David Weinehallc49d13e2016-08-22 13:32:42 +03002017 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak76c4b252014-04-01 19:55:22 +03002018 return 0;
Kristian Høgsberg112b7152009-01-04 16:55:33 -05002019
David Weinehallc49d13e2016-08-22 13:32:42 +03002020 return i915_drm_suspend_late(dev, false);
Imre Deakab3be732015-03-02 13:04:41 +02002021}
2022
David Weinehallc49d13e2016-08-22 13:32:42 +03002023static int i915_pm_poweroff_late(struct device *kdev)
Imre Deakab3be732015-03-02 13:04:41 +02002024{
David Weinehallc49d13e2016-08-22 13:32:42 +03002025 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Imre Deakab3be732015-03-02 13:04:41 +02002026
David Weinehallc49d13e2016-08-22 13:32:42 +03002027 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deakab3be732015-03-02 13:04:41 +02002028 return 0;
2029
David Weinehallc49d13e2016-08-22 13:32:42 +03002030 return i915_drm_suspend_late(dev, true);
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08002031}
2032
David Weinehallc49d13e2016-08-22 13:32:42 +03002033static int i915_pm_resume_early(struct device *kdev)
Imre Deak76c4b252014-04-01 19:55:22 +03002034{
David Weinehallc49d13e2016-08-22 13:32:42 +03002035 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Imre Deak76c4b252014-04-01 19:55:22 +03002036
David Weinehallc49d13e2016-08-22 13:32:42 +03002037 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak097dd832014-10-23 19:23:19 +03002038 return 0;
2039
David Weinehallc49d13e2016-08-22 13:32:42 +03002040 return i915_drm_resume_early(dev);
Imre Deak76c4b252014-04-01 19:55:22 +03002041}
2042
David Weinehallc49d13e2016-08-22 13:32:42 +03002043static int i915_pm_resume(struct device *kdev)
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08002044{
David Weinehallc49d13e2016-08-22 13:32:42 +03002045 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01002046
David Weinehallc49d13e2016-08-22 13:32:42 +03002047 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak097dd832014-10-23 19:23:19 +03002048 return 0;
2049
David Weinehallc49d13e2016-08-22 13:32:42 +03002050 return i915_drm_resume(dev);
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08002051}
2052
Chris Wilson1f19ac22016-05-14 07:26:32 +01002053/* freeze: before creating the hibernation_image */
David Weinehallc49d13e2016-08-22 13:32:42 +03002054static int i915_pm_freeze(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002055{
Chris Wilson6a800ea2016-09-21 14:51:07 +01002056 int ret;
2057
2058 ret = i915_pm_suspend(kdev);
2059 if (ret)
2060 return ret;
2061
2062 ret = i915_gem_freeze(kdev_to_i915(kdev));
2063 if (ret)
2064 return ret;
2065
2066 return 0;
Chris Wilson1f19ac22016-05-14 07:26:32 +01002067}
2068
David Weinehallc49d13e2016-08-22 13:32:42 +03002069static int i915_pm_freeze_late(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002070{
Chris Wilson461fb992016-05-14 07:26:33 +01002071 int ret;
2072
David Weinehallc49d13e2016-08-22 13:32:42 +03002073 ret = i915_pm_suspend_late(kdev);
Chris Wilson461fb992016-05-14 07:26:33 +01002074 if (ret)
2075 return ret;
2076
David Weinehallc49d13e2016-08-22 13:32:42 +03002077 ret = i915_gem_freeze_late(kdev_to_i915(kdev));
Chris Wilson461fb992016-05-14 07:26:33 +01002078 if (ret)
2079 return ret;
2080
2081 return 0;
Chris Wilson1f19ac22016-05-14 07:26:32 +01002082}
2083
2084/* thaw: called after creating the hibernation image, but before turning off. */
David Weinehallc49d13e2016-08-22 13:32:42 +03002085static int i915_pm_thaw_early(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002086{
David Weinehallc49d13e2016-08-22 13:32:42 +03002087 return i915_pm_resume_early(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01002088}
2089
David Weinehallc49d13e2016-08-22 13:32:42 +03002090static int i915_pm_thaw(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002091{
David Weinehallc49d13e2016-08-22 13:32:42 +03002092 return i915_pm_resume(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01002093}
2094
2095/* restore: called after loading the hibernation image. */
David Weinehallc49d13e2016-08-22 13:32:42 +03002096static int i915_pm_restore_early(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002097{
David Weinehallc49d13e2016-08-22 13:32:42 +03002098 return i915_pm_resume_early(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01002099}
2100
David Weinehallc49d13e2016-08-22 13:32:42 +03002101static int i915_pm_restore(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002102{
David Weinehallc49d13e2016-08-22 13:32:42 +03002103 return i915_pm_resume(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01002104}
2105
Imre Deakddeea5b2014-05-05 15:19:56 +03002106/*
2107 * Save all Gunit registers that may be lost after a D3 and a subsequent
2108 * S0i[R123] transition. The list of registers needing a save/restore is
2109 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
2110 * registers in the following way:
2111 * - Driver: saved/restored by the driver
2112 * - Punit : saved/restored by the Punit firmware
2113 * - No, w/o marking: no need to save/restore, since the register is R/O or
2114 * used internally by the HW in a way that doesn't depend
2115 * keeping the content across a suspend/resume.
2116 * - Debug : used for debugging
2117 *
2118 * We save/restore all registers marked with 'Driver', with the following
2119 * exceptions:
2120 * - Registers out of use, including also registers marked with 'Debug'.
2121 * These have no effect on the driver's operation, so we don't save/restore
2122 * them to reduce the overhead.
2123 * - Registers that are fully setup by an initialization function called from
2124 * the resume path. For example many clock gating and RPS/RC6 registers.
2125 * - Registers that provide the right functionality with their reset defaults.
2126 *
2127 * TODO: Except for registers that based on the above 3 criteria can be safely
2128 * ignored, we save/restore all others, practically treating the HW context as
2129 * a black-box for the driver. Further investigation is needed to reduce the
2130 * saved/restored registers even further, by following the same 3 criteria.
2131 */
2132static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2133{
2134 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2135 int i;
2136
2137 /* GAM 0x4000-0x4770 */
2138 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
2139 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
2140 s->arb_mode = I915_READ(ARB_MODE);
2141 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
2142 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
2143
2144 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002145 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
Imre Deakddeea5b2014-05-05 15:19:56 +03002146
2147 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
Imre Deakb5f1c972015-04-15 16:52:30 -07002148 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
Imre Deakddeea5b2014-05-05 15:19:56 +03002149
2150 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
2151 s->ecochk = I915_READ(GAM_ECOCHK);
2152 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
2153 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
2154
2155 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
2156
2157 /* MBC 0x9024-0x91D0, 0x8500 */
2158 s->g3dctl = I915_READ(VLV_G3DCTL);
2159 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
2160 s->mbctl = I915_READ(GEN6_MBCTL);
2161
2162 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2163 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
2164 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
2165 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
2166 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
2167 s->rstctl = I915_READ(GEN6_RSTCTL);
2168 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
2169
2170 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2171 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
2172 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
2173 s->rpdeuc = I915_READ(GEN6_RPDEUC);
2174 s->ecobus = I915_READ(ECOBUS);
2175 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
2176 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
2177 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
2178 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
2179 s->rcedata = I915_READ(VLV_RCEDATA);
2180 s->spare2gh = I915_READ(VLV_SPAREG2H);
2181
2182 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2183 s->gt_imr = I915_READ(GTIMR);
2184 s->gt_ier = I915_READ(GTIER);
2185 s->pm_imr = I915_READ(GEN6_PMIMR);
2186 s->pm_ier = I915_READ(GEN6_PMIER);
2187
2188 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002189 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
Imre Deakddeea5b2014-05-05 15:19:56 +03002190
2191 /* GT SA CZ domain, 0x100000-0x138124 */
2192 s->tilectl = I915_READ(TILECTL);
2193 s->gt_fifoctl = I915_READ(GTFIFOCTL);
2194 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
2195 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2196 s->pmwgicz = I915_READ(VLV_PMWGICZ);
2197
2198 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2199 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
2200 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
Jesse Barnes9c252102015-04-01 14:22:57 -07002201 s->pcbr = I915_READ(VLV_PCBR);
Imre Deakddeea5b2014-05-05 15:19:56 +03002202 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
2203
2204 /*
2205 * Not saving any of:
2206 * DFT, 0x9800-0x9EC0
2207 * SARB, 0xB000-0xB1FC
2208 * GAC, 0x5208-0x524C, 0x14000-0x14C000
2209 * PCI CFG
2210 */
2211}
2212
2213static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2214{
2215 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2216 u32 val;
2217 int i;
2218
2219 /* GAM 0x4000-0x4770 */
2220 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
2221 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
2222 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
2223 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
2224 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
2225
2226 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002227 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
Imre Deakddeea5b2014-05-05 15:19:56 +03002228
2229 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
Imre Deakb5f1c972015-04-15 16:52:30 -07002230 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
Imre Deakddeea5b2014-05-05 15:19:56 +03002231
2232 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
2233 I915_WRITE(GAM_ECOCHK, s->ecochk);
2234 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
2235 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
2236
2237 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
2238
2239 /* MBC 0x9024-0x91D0, 0x8500 */
2240 I915_WRITE(VLV_G3DCTL, s->g3dctl);
2241 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
2242 I915_WRITE(GEN6_MBCTL, s->mbctl);
2243
2244 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2245 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
2246 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
2247 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
2248 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
2249 I915_WRITE(GEN6_RSTCTL, s->rstctl);
2250 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
2251
2252 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2253 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
2254 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
2255 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
2256 I915_WRITE(ECOBUS, s->ecobus);
2257 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
2258 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
2259 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
2260 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
2261 I915_WRITE(VLV_RCEDATA, s->rcedata);
2262 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
2263
2264 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2265 I915_WRITE(GTIMR, s->gt_imr);
2266 I915_WRITE(GTIER, s->gt_ier);
2267 I915_WRITE(GEN6_PMIMR, s->pm_imr);
2268 I915_WRITE(GEN6_PMIER, s->pm_ier);
2269
2270 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002271 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
Imre Deakddeea5b2014-05-05 15:19:56 +03002272
2273 /* GT SA CZ domain, 0x100000-0x138124 */
2274 I915_WRITE(TILECTL, s->tilectl);
2275 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
2276 /*
2277 * Preserve the GT allow wake and GFX force clock bit, they are not
2278 * be restored, as they are used to control the s0ix suspend/resume
2279 * sequence by the caller.
2280 */
2281 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2282 val &= VLV_GTLC_ALLOWWAKEREQ;
2283 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
2284 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2285
2286 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2287 val &= VLV_GFX_CLK_FORCE_ON_BIT;
2288 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
2289 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2290
2291 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
2292
2293 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2294 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
2295 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
Jesse Barnes9c252102015-04-01 14:22:57 -07002296 I915_WRITE(VLV_PCBR, s->pcbr);
Imre Deakddeea5b2014-05-05 15:19:56 +03002297 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
2298}
2299
Chris Wilson3dd14c02017-04-21 14:58:15 +01002300static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
2301 u32 mask, u32 val)
2302{
2303 /* The HW does not like us polling for PW_STATUS frequently, so
2304 * use the sleeping loop rather than risk the busy spin within
2305 * intel_wait_for_register().
2306 *
2307 * Transitioning between RC6 states should be at most 2ms (see
2308 * valleyview_enable_rps) so use a 3ms timeout.
2309 */
2310 return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val,
2311 3);
2312}
2313
Imre Deak650ad972014-04-18 16:35:02 +03002314int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2315{
2316 u32 val;
2317 int err;
2318
Imre Deak650ad972014-04-18 16:35:02 +03002319 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2320 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
2321 if (force_on)
2322 val |= VLV_GFX_CLK_FORCE_ON_BIT;
2323 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2324
2325 if (!force_on)
2326 return 0;
2327
Chris Wilsonc6ddc5f2016-06-30 15:32:46 +01002328 err = intel_wait_for_register(dev_priv,
2329 VLV_GTLC_SURVIVABILITY_REG,
2330 VLV_GFX_CLK_STATUS_BIT,
2331 VLV_GFX_CLK_STATUS_BIT,
2332 20);
Imre Deak650ad972014-04-18 16:35:02 +03002333 if (err)
2334 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
2335 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
2336
2337 return err;
Imre Deak650ad972014-04-18 16:35:02 +03002338}
2339
Imre Deakddeea5b2014-05-05 15:19:56 +03002340static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2341{
Chris Wilson3dd14c02017-04-21 14:58:15 +01002342 u32 mask;
Imre Deakddeea5b2014-05-05 15:19:56 +03002343 u32 val;
Chris Wilson3dd14c02017-04-21 14:58:15 +01002344 int err;
Imre Deakddeea5b2014-05-05 15:19:56 +03002345
2346 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2347 val &= ~VLV_GTLC_ALLOWWAKEREQ;
2348 if (allow)
2349 val |= VLV_GTLC_ALLOWWAKEREQ;
2350 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2351 POSTING_READ(VLV_GTLC_WAKE_CTRL);
2352
Chris Wilson3dd14c02017-04-21 14:58:15 +01002353 mask = VLV_GTLC_ALLOWWAKEACK;
2354 val = allow ? mask : 0;
2355
2356 err = vlv_wait_for_pw_status(dev_priv, mask, val);
Imre Deakddeea5b2014-05-05 15:19:56 +03002357 if (err)
2358 DRM_ERROR("timeout disabling GT waking\n");
Chris Wilsonb2736692016-06-30 15:32:47 +01002359
Imre Deakddeea5b2014-05-05 15:19:56 +03002360 return err;
Imre Deakddeea5b2014-05-05 15:19:56 +03002361}
2362
Chris Wilson3dd14c02017-04-21 14:58:15 +01002363static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
2364 bool wait_for_on)
Imre Deakddeea5b2014-05-05 15:19:56 +03002365{
2366 u32 mask;
2367 u32 val;
Imre Deakddeea5b2014-05-05 15:19:56 +03002368
2369 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
2370 val = wait_for_on ? mask : 0;
Imre Deakddeea5b2014-05-05 15:19:56 +03002371
2372 /*
2373 * RC6 transitioning can be delayed up to 2 msec (see
2374 * valleyview_enable_rps), use 3 msec for safety.
2375 */
Chris Wilson3dd14c02017-04-21 14:58:15 +01002376 if (vlv_wait_for_pw_status(dev_priv, mask, val))
Imre Deakddeea5b2014-05-05 15:19:56 +03002377 DRM_ERROR("timeout waiting for GT wells to go %s\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02002378 onoff(wait_for_on));
Imre Deakddeea5b2014-05-05 15:19:56 +03002379}
2380
2381static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
2382{
2383 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
2384 return;
2385
Daniel Vetter6fa283b2016-01-19 21:00:56 +01002386 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
Imre Deakddeea5b2014-05-05 15:19:56 +03002387 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
2388}
2389
Sagar Kambleebc32822014-08-13 23:07:05 +05302390static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
Imre Deakddeea5b2014-05-05 15:19:56 +03002391{
2392 u32 mask;
2393 int err;
2394
2395 /*
2396 * Bspec defines the following GT well on flags as debug only, so
2397 * don't treat them as hard failures.
2398 */
Chris Wilson3dd14c02017-04-21 14:58:15 +01002399 vlv_wait_for_gt_wells(dev_priv, false);
Imre Deakddeea5b2014-05-05 15:19:56 +03002400
2401 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2402 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
2403
2404 vlv_check_no_gt_access(dev_priv);
2405
2406 err = vlv_force_gfx_clock(dev_priv, true);
2407 if (err)
2408 goto err1;
2409
2410 err = vlv_allow_gt_wake(dev_priv, false);
2411 if (err)
2412 goto err2;
Deepak S98711162014-12-12 14:18:16 +05302413
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002414 if (!IS_CHERRYVIEW(dev_priv))
Deepak S98711162014-12-12 14:18:16 +05302415 vlv_save_gunit_s0ix_state(dev_priv);
Imre Deakddeea5b2014-05-05 15:19:56 +03002416
2417 err = vlv_force_gfx_clock(dev_priv, false);
2418 if (err)
2419 goto err2;
2420
2421 return 0;
2422
2423err2:
2424 /* For safety always re-enable waking and disable gfx clock forcing */
2425 vlv_allow_gt_wake(dev_priv, true);
2426err1:
2427 vlv_force_gfx_clock(dev_priv, false);
2428
2429 return err;
2430}
2431
Sagar Kamble016970b2014-08-13 23:07:06 +05302432static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2433 bool rpm_resume)
Imre Deakddeea5b2014-05-05 15:19:56 +03002434{
Imre Deakddeea5b2014-05-05 15:19:56 +03002435 int err;
2436 int ret;
2437
2438 /*
2439 * If any of the steps fail just try to continue, that's the best we
2440 * can do at this point. Return the first error code (which will also
2441 * leave RPM permanently disabled).
2442 */
2443 ret = vlv_force_gfx_clock(dev_priv, true);
2444
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002445 if (!IS_CHERRYVIEW(dev_priv))
Deepak S98711162014-12-12 14:18:16 +05302446 vlv_restore_gunit_s0ix_state(dev_priv);
Imre Deakddeea5b2014-05-05 15:19:56 +03002447
2448 err = vlv_allow_gt_wake(dev_priv, true);
2449 if (!ret)
2450 ret = err;
2451
2452 err = vlv_force_gfx_clock(dev_priv, false);
2453 if (!ret)
2454 ret = err;
2455
2456 vlv_check_no_gt_access(dev_priv);
2457
Chris Wilson7c108fd2016-10-24 13:42:18 +01002458 if (rpm_resume)
Ville Syrjälä46f16e62016-10-31 22:37:22 +02002459 intel_init_clock_gating(dev_priv);
Imre Deakddeea5b2014-05-05 15:19:56 +03002460
2461 return ret;
2462}
2463
David Weinehallc49d13e2016-08-22 13:32:42 +03002464static int intel_runtime_suspend(struct device *kdev)
Paulo Zanoni8a187452013-12-06 20:32:13 -02002465{
David Weinehallc49d13e2016-08-22 13:32:42 +03002466 struct pci_dev *pdev = to_pci_dev(kdev);
Paulo Zanoni8a187452013-12-06 20:32:13 -02002467 struct drm_device *dev = pci_get_drvdata(pdev);
Chris Wilsonfac5e232016-07-04 11:34:36 +01002468 struct drm_i915_private *dev_priv = to_i915(dev);
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002469 int ret;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002470
Chris Wilsondc979972016-05-10 14:10:04 +01002471 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
Imre Deakc6df39b2014-04-14 20:24:29 +03002472 return -ENODEV;
2473
Tvrtko Ursulin6772ffe2016-10-13 11:02:55 +01002474 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
Imre Deak604effb2014-08-26 13:26:56 +03002475 return -ENODEV;
2476
Paulo Zanoni8a187452013-12-06 20:32:13 -02002477 DRM_DEBUG_KMS("Suspending device\n");
2478
Imre Deak1f814da2015-12-16 02:52:19 +02002479 disable_rpm_wakeref_asserts(dev_priv);
2480
Imre Deakd6102972014-05-07 19:57:49 +03002481 /*
2482 * We are safe here against re-faults, since the fault handler takes
2483 * an RPM reference.
2484 */
Chris Wilson7c108fd2016-10-24 13:42:18 +01002485 i915_gem_runtime_suspend(dev_priv);
Imre Deakd6102972014-05-07 19:57:49 +03002486
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00002487 intel_guc_suspend(dev_priv);
Alex Daia1c41992015-09-30 09:46:37 -07002488
Imre Deak2eb52522014-11-19 15:30:05 +02002489 intel_runtime_pm_disable_interrupts(dev_priv);
Imre Deakb5478bc2014-04-14 20:24:37 +03002490
Imre Deak507e1262016-04-20 20:27:54 +03002491 ret = 0;
Rodrigo Vivib9fd7992016-12-16 17:42:25 +02002492 if (IS_GEN9_LP(dev_priv)) {
Imre Deak507e1262016-04-20 20:27:54 +03002493 bxt_display_core_uninit(dev_priv);
2494 bxt_enable_dc9(dev_priv);
2495 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2496 hsw_enable_pc8(dev_priv);
2497 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2498 ret = vlv_suspend_complete(dev_priv);
2499 }
2500
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002501 if (ret) {
2502 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
Daniel Vetterb9632912014-09-30 10:56:44 +02002503 intel_runtime_pm_enable_interrupts(dev_priv);
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002504
Imre Deak1f814da2015-12-16 02:52:19 +02002505 enable_rpm_wakeref_asserts(dev_priv);
2506
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002507 return ret;
2508 }
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03002509
Hans de Goede68f60942017-02-10 11:28:01 +01002510 intel_uncore_suspend(dev_priv);
Imre Deak1f814da2015-12-16 02:52:19 +02002511
2512 enable_rpm_wakeref_asserts(dev_priv);
2513 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02002514
Mika Kuoppalabc3b9342016-01-08 15:51:20 +02002515 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02002516 DRM_ERROR("Unclaimed access detected prior to suspending\n");
2517
Paulo Zanoni8a187452013-12-06 20:32:13 -02002518 dev_priv->pm.suspended = true;
Kristen Carlson Accardi1fb23622014-01-14 15:36:15 -08002519
2520 /*
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03002521 * FIXME: We really should find a document that references the arguments
2522 * used below!
Kristen Carlson Accardi1fb23622014-01-14 15:36:15 -08002523 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002524 if (IS_BROADWELL(dev_priv)) {
Paulo Zanonid37ae192015-07-30 18:20:29 -03002525 /*
2526 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
2527 * being detected, and the call we do at intel_runtime_resume()
2528 * won't be able to restore them. Since PCI_D3hot matches the
2529 * actual specification and appears to be working, use it.
2530 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002531 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
Paulo Zanonid37ae192015-07-30 18:20:29 -03002532 } else {
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03002533 /*
2534 * current versions of firmware which depend on this opregion
2535 * notification have repurposed the D1 definition to mean
2536 * "runtime suspended" vs. what you would normally expect (D3)
2537 * to distinguish it from notifications that might be sent via
2538 * the suspend path.
2539 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002540 intel_opregion_notify_adapter(dev_priv, PCI_D1);
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03002541 }
Paulo Zanoni8a187452013-12-06 20:32:13 -02002542
Mika Kuoppala59bad942015-01-16 11:34:40 +02002543 assert_forcewakes_inactive(dev_priv);
Chris Wilsondc9fb092015-01-16 11:34:34 +02002544
Ander Conselvan de Oliveira21d6e0b2017-01-20 16:28:43 +02002545 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
Lyude19625e82016-06-21 17:03:44 -04002546 intel_hpd_poll_init(dev_priv);
2547
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03002548 DRM_DEBUG_KMS("Device suspended\n");
Paulo Zanoni8a187452013-12-06 20:32:13 -02002549 return 0;
2550}
2551
David Weinehallc49d13e2016-08-22 13:32:42 +03002552static int intel_runtime_resume(struct device *kdev)
Paulo Zanoni8a187452013-12-06 20:32:13 -02002553{
David Weinehallc49d13e2016-08-22 13:32:42 +03002554 struct pci_dev *pdev = to_pci_dev(kdev);
Paulo Zanoni8a187452013-12-06 20:32:13 -02002555 struct drm_device *dev = pci_get_drvdata(pdev);
Chris Wilsonfac5e232016-07-04 11:34:36 +01002556 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002557 int ret = 0;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002558
Tvrtko Ursulin6772ffe2016-10-13 11:02:55 +01002559 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
Imre Deak604effb2014-08-26 13:26:56 +03002560 return -ENODEV;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002561
2562 DRM_DEBUG_KMS("Resuming device\n");
2563
Imre Deak1f814da2015-12-16 02:52:19 +02002564 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
2565 disable_rpm_wakeref_asserts(dev_priv);
2566
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002567 intel_opregion_notify_adapter(dev_priv, PCI_D0);
Paulo Zanoni8a187452013-12-06 20:32:13 -02002568 dev_priv->pm.suspended = false;
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02002569 if (intel_uncore_unclaimed_mmio(dev_priv))
2570 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
Paulo Zanoni8a187452013-12-06 20:32:13 -02002571
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00002572 intel_guc_resume(dev_priv);
Alex Daia1c41992015-09-30 09:46:37 -07002573
Rodrigo Vivib9fd7992016-12-16 17:42:25 +02002574 if (IS_GEN9_LP(dev_priv)) {
Imre Deak507e1262016-04-20 20:27:54 +03002575 bxt_disable_dc9(dev_priv);
2576 bxt_display_core_init(dev_priv, true);
Imre Deakf62c79b2016-04-20 20:27:57 +03002577 if (dev_priv->csr.dmc_payload &&
2578 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2579 gen9_enable_dc5(dev_priv);
Imre Deak507e1262016-04-20 20:27:54 +03002580 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002581 hsw_disable_pc8(dev_priv);
Imre Deak507e1262016-04-20 20:27:54 +03002582 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002583 ret = vlv_resume_prepare(dev_priv, true);
Imre Deak507e1262016-04-20 20:27:54 +03002584 }
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002585
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002586 /*
2587 * No point of rolling back things in case of an error, as the best
2588 * we can do is to hope that things will still work (and disable RPM).
2589 */
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00002590 i915_gem_init_swizzling(dev_priv);
Chris Wilson83bf6d52017-02-03 12:57:17 +00002591 i915_gem_restore_fences(dev_priv);
Imre Deak92b806d2014-04-14 20:24:39 +03002592
Daniel Vetterb9632912014-09-30 10:56:44 +02002593 intel_runtime_pm_enable_interrupts(dev_priv);
Ville Syrjälä08d8a232015-08-27 23:56:08 +03002594
2595 /*
2596 * On VLV/CHV display interrupts are part of the display
2597 * power well, so hpd is reinitialized from there. For
2598 * everyone else do it here.
2599 */
Wayne Boyer666a4532015-12-09 12:29:35 -08002600 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
Ville Syrjälä08d8a232015-08-27 23:56:08 +03002601 intel_hpd_init(dev_priv);
2602
Imre Deak1f814da2015-12-16 02:52:19 +02002603 enable_rpm_wakeref_asserts(dev_priv);
2604
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002605 if (ret)
2606 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
2607 else
2608 DRM_DEBUG_KMS("Device resumed\n");
2609
2610 return ret;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002611}
2612
Chris Wilson42f55512016-06-24 14:00:26 +01002613const struct dev_pm_ops i915_pm_ops = {
Imre Deak5545dbb2014-10-23 19:23:28 +03002614 /*
2615 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
2616 * PMSG_RESUME]
2617 */
Akshay Joshi0206e352011-08-16 15:34:10 -04002618 .suspend = i915_pm_suspend,
Imre Deak76c4b252014-04-01 19:55:22 +03002619 .suspend_late = i915_pm_suspend_late,
2620 .resume_early = i915_pm_resume_early,
Akshay Joshi0206e352011-08-16 15:34:10 -04002621 .resume = i915_pm_resume,
Imre Deak5545dbb2014-10-23 19:23:28 +03002622
2623 /*
2624 * S4 event handlers
2625 * @freeze, @freeze_late : called (1) before creating the
2626 * hibernation image [PMSG_FREEZE] and
2627 * (2) after rebooting, before restoring
2628 * the image [PMSG_QUIESCE]
2629 * @thaw, @thaw_early : called (1) after creating the hibernation
2630 * image, before writing it [PMSG_THAW]
2631 * and (2) after failing to create or
2632 * restore the image [PMSG_RECOVER]
2633 * @poweroff, @poweroff_late: called after writing the hibernation
2634 * image, before rebooting [PMSG_HIBERNATE]
2635 * @restore, @restore_early : called after rebooting and restoring the
2636 * hibernation image [PMSG_RESTORE]
2637 */
Chris Wilson1f19ac22016-05-14 07:26:32 +01002638 .freeze = i915_pm_freeze,
2639 .freeze_late = i915_pm_freeze_late,
2640 .thaw_early = i915_pm_thaw_early,
2641 .thaw = i915_pm_thaw,
Imre Deak36d61e62014-10-23 19:23:24 +03002642 .poweroff = i915_pm_suspend,
Imre Deakab3be732015-03-02 13:04:41 +02002643 .poweroff_late = i915_pm_poweroff_late,
Chris Wilson1f19ac22016-05-14 07:26:32 +01002644 .restore_early = i915_pm_restore_early,
2645 .restore = i915_pm_restore,
Imre Deak5545dbb2014-10-23 19:23:28 +03002646
2647 /* S0ix (via runtime suspend) event handlers */
Paulo Zanoni97bea202014-03-07 20:12:33 -03002648 .runtime_suspend = intel_runtime_suspend,
2649 .runtime_resume = intel_runtime_resume,
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08002650};
2651
Laurent Pinchart78b68552012-05-17 13:27:22 +02002652static const struct vm_operations_struct i915_gem_vm_ops = {
Jesse Barnesde151cf2008-11-12 10:03:55 -08002653 .fault = i915_gem_fault,
Jesse Barnesab00b3e2009-02-11 14:01:46 -08002654 .open = drm_gem_vm_open,
2655 .close = drm_gem_vm_close,
Jesse Barnesde151cf2008-11-12 10:03:55 -08002656};
2657
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002658static const struct file_operations i915_driver_fops = {
2659 .owner = THIS_MODULE,
2660 .open = drm_open,
2661 .release = drm_release,
2662 .unlocked_ioctl = drm_ioctl,
2663 .mmap = drm_gem_mmap,
2664 .poll = drm_poll,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002665 .read = drm_read,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002666 .compat_ioctl = i915_compat_ioctl,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002667 .llseek = noop_llseek,
2668};
2669
Chris Wilson0673ad42016-06-24 14:00:22 +01002670static int
2671i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
2672 struct drm_file *file)
2673{
2674 return -ENODEV;
2675}
2676
2677static const struct drm_ioctl_desc i915_ioctls[] = {
2678 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2679 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
2680 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
2681 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
2682 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
2683 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2684 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
2685 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2686 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2687 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2688 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2689 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
2690 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2691 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2692 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
2693 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2694 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2695 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2696 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
Chris Wilsonfec04452017-01-27 09:40:08 +00002697 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01002698 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2699 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2700 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2701 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
2702 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
2703 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2704 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2705 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2706 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
2707 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
2708 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
2709 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
2710 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
2711 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
2712 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
Chris Wilson111dbca2017-01-10 12:10:44 +00002713 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
2714 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01002715 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
2716 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
2717 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
2718 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2719 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2720 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
2721 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
2722 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2723 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
2724 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
2725 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
2726 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
2727 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
2728 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
2729 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
Robert Braggeec688e2016-11-07 19:49:47 +00002730 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01002731};
2732
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733static struct drm_driver driver = {
Michael Witten0c547812011-08-25 17:55:54 +00002734 /* Don't use MTRRs here; the Xserver or userspace app should
2735 * deal with them for Intel hardware.
Dave Airlie792d2b92005-11-11 23:30:27 +11002736 */
Eric Anholt673a3942008-07-30 12:06:12 -07002737 .driver_features =
Kristian Høgsberg10ba5012013-08-25 18:29:01 +02002738 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
Maarten Lankhorst8d2b47d2017-02-02 08:41:42 +01002739 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC,
Chris Wilsoncad36882017-02-10 16:35:21 +00002740 .release = i915_driver_release,
Eric Anholt673a3942008-07-30 12:06:12 -07002741 .open = i915_driver_open,
Dave Airlie22eae942005-11-10 22:16:34 +11002742 .lastclose = i915_driver_lastclose,
Eric Anholt673a3942008-07-30 12:06:12 -07002743 .postclose = i915_driver_postclose,
David Herrmann915b4d12014-08-29 12:12:43 +02002744 .set_busid = drm_pci_set_busid,
Rafael J. Wysockid8e29202010-01-09 00:45:33 +01002745
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002746 .gem_close_object = i915_gem_close_object,
Chris Wilsonf0cd5182016-10-28 13:58:43 +01002747 .gem_free_object_unlocked = i915_gem_free_object,
Jesse Barnesde151cf2008-11-12 10:03:55 -08002748 .gem_vm_ops = &i915_gem_vm_ops,
Daniel Vetter1286ff72012-05-10 15:25:09 +02002749
2750 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
2751 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
2752 .gem_prime_export = i915_gem_prime_export,
2753 .gem_prime_import = i915_gem_prime_import,
2754
Dave Airlieff72145b2011-02-07 12:16:14 +10002755 .dumb_create = i915_gem_dumb_create,
Dave Airlieda6b51d2014-12-24 13:11:17 +10002756 .dumb_map_offset = i915_gem_mmap_gtt,
Daniel Vetter43387b32013-07-16 09:12:04 +02002757 .dumb_destroy = drm_gem_dumb_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 .ioctls = i915_ioctls,
Chris Wilson0673ad42016-06-24 14:00:22 +01002759 .num_ioctls = ARRAY_SIZE(i915_ioctls),
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002760 .fops = &i915_driver_fops,
Dave Airlie22eae942005-11-10 22:16:34 +11002761 .name = DRIVER_NAME,
2762 .desc = DRIVER_DESC,
2763 .date = DRIVER_DATE,
2764 .major = DRIVER_MAJOR,
2765 .minor = DRIVER_MINOR,
2766 .patchlevel = DRIVER_PATCHLEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767};
Chris Wilson66d9cb52017-02-13 17:15:17 +00002768
2769#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2770#include "selftests/mock_drm.c"
2771#endif