Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- |
| 2 | */ |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 3 | /* |
Dave Airlie | bc54fd1 | 2005-06-23 22:46:46 +1000 | [diff] [blame] | 4 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
| 6 | * All Rights Reserved. |
Dave Airlie | bc54fd1 | 2005-06-23 22:46:46 +1000 | [diff] [blame] | 7 | * |
| 8 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 9 | * copy of this software and associated documentation files (the |
| 10 | * "Software"), to deal in the Software without restriction, including |
| 11 | * without limitation the rights to use, copy, modify, merge, publish, |
| 12 | * distribute, sub license, and/or sell copies of the Software, and to |
| 13 | * permit persons to whom the Software is furnished to do so, subject to |
| 14 | * the following conditions: |
| 15 | * |
| 16 | * The above copyright notice and this permission notice (including the |
| 17 | * next paragraph) shall be included in all copies or substantial portions |
| 18 | * of the Software. |
| 19 | * |
| 20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
| 21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
| 23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
| 24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| 25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| 26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 27 | * |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 28 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Jesse Barnes | 5669fca | 2009-02-17 15:13:31 -0800 | [diff] [blame] | 30 | #include <linux/device.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 31 | #include <drm/drmP.h> |
| 32 | #include <drm/i915_drm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include "i915_drv.h" |
Chris Wilson | 990bbda | 2012-07-02 11:51:02 -0300 | [diff] [blame] | 34 | #include "i915_trace.h" |
Kenneth Graunke | f49f058 | 2010-09-11 01:19:14 -0700 | [diff] [blame] | 35 | #include "intel_drv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 37 | #include <linux/console.h> |
Paul Gortmaker | e0cd360 | 2011-08-30 11:04:30 -0400 | [diff] [blame] | 38 | #include <linux/module.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 39 | #include <drm/drm_crtc_helper.h> |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 40 | |
Ben Widawsky | a35d9d3 | 2011-07-13 14:38:17 -0700 | [diff] [blame] | 41 | static int i915_modeset __read_mostly = -1; |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 42 | module_param_named(modeset, i915_modeset, int, 0400); |
Ben Widawsky | 6e96e77 | 2011-07-13 14:38:18 -0700 | [diff] [blame] | 43 | MODULE_PARM_DESC(modeset, |
| 44 | "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " |
| 45 | "1=on, -1=force vga console preference [default])"); |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 46 | |
Ben Widawsky | a35d9d3 | 2011-07-13 14:38:17 -0700 | [diff] [blame] | 47 | unsigned int i915_fbpercrtc __always_unused = 0; |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 48 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
Daniel Vetter | a726915 | 2012-11-20 14:50:08 +0100 | [diff] [blame] | 50 | int i915_panel_ignore_lid __read_mostly = 1; |
Chris Wilson | fca8740 | 2011-02-17 13:44:48 +0000 | [diff] [blame] | 51 | module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); |
Ben Widawsky | 6e96e77 | 2011-07-13 14:38:18 -0700 | [diff] [blame] | 52 | MODULE_PARM_DESC(panel_ignore_lid, |
Daniel Vetter | a726915 | 2012-11-20 14:50:08 +0100 | [diff] [blame] | 53 | "Override lid status (0=autodetect, 1=autodetect disabled [default], " |
| 54 | "-1=force lid closed, -2=force lid open)"); |
Chris Wilson | fca8740 | 2011-02-17 13:44:48 +0000 | [diff] [blame] | 55 | |
Ben Widawsky | a35d9d3 | 2011-07-13 14:38:17 -0700 | [diff] [blame] | 56 | unsigned int i915_powersave __read_mostly = 1; |
Chris Wilson | 0aa9927 | 2010-11-02 09:20:50 +0000 | [diff] [blame] | 57 | module_param_named(powersave, i915_powersave, int, 0600); |
Ben Widawsky | 6e96e77 | 2011-07-13 14:38:18 -0700 | [diff] [blame] | 58 | MODULE_PARM_DESC(powersave, |
| 59 | "Enable powersavings, fbc, downclocking, etc. (default: true)"); |
Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 60 | |
Eugeni Dodonov | f45b555 | 2011-12-09 17:16:37 -0800 | [diff] [blame] | 61 | int i915_semaphores __read_mostly = -1; |
Chris Wilson | a1656b9 | 2011-03-04 18:48:03 +0000 | [diff] [blame] | 62 | module_param_named(semaphores, i915_semaphores, int, 0600); |
Ben Widawsky | 6e96e77 | 2011-07-13 14:38:18 -0700 | [diff] [blame] | 63 | MODULE_PARM_DESC(semaphores, |
Eugeni Dodonov | f45b555 | 2011-12-09 17:16:37 -0800 | [diff] [blame] | 64 | "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); |
Chris Wilson | a1656b9 | 2011-03-04 18:48:03 +0000 | [diff] [blame] | 65 | |
Keith Packard | c0f372b3 | 2011-11-16 22:24:52 -0800 | [diff] [blame] | 66 | int i915_enable_rc6 __read_mostly = -1; |
Jesse Barnes | f57f9c1 | 2012-04-11 09:39:02 -0700 | [diff] [blame] | 67 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400); |
Ben Widawsky | 6e96e77 | 2011-07-13 14:38:18 -0700 | [diff] [blame] | 68 | MODULE_PARM_DESC(i915_enable_rc6, |
Eugeni Dodonov | 83b7f9a | 2012-03-23 11:57:18 -0300 | [diff] [blame] | 69 | "Enable power-saving render C-state 6. " |
| 70 | "Different stages can be selected via bitmask values " |
| 71 | "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " |
| 72 | "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " |
| 73 | "default: -1 (use per-chip default)"); |
Chris Wilson | ac66808 | 2011-02-09 16:15:32 +0000 | [diff] [blame] | 74 | |
Keith Packard | 4415e63 | 2011-11-09 09:57:50 -0800 | [diff] [blame] | 75 | int i915_enable_fbc __read_mostly = -1; |
Jesse Barnes | c1a9f04 | 2011-05-05 15:24:21 -0700 | [diff] [blame] | 76 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); |
Ben Widawsky | 6e96e77 | 2011-07-13 14:38:18 -0700 | [diff] [blame] | 77 | MODULE_PARM_DESC(i915_enable_fbc, |
| 78 | "Enable frame buffer compression for power savings " |
Keith Packard | cd0de03 | 2011-09-19 21:34:19 -0700 | [diff] [blame] | 79 | "(default: -1 (use per-chip default))"); |
Jesse Barnes | c1a9f04 | 2011-05-05 15:24:21 -0700 | [diff] [blame] | 80 | |
Ben Widawsky | a35d9d3 | 2011-07-13 14:38:17 -0700 | [diff] [blame] | 81 | unsigned int i915_lvds_downclock __read_mostly = 0; |
Jesse Barnes | 3381434 | 2010-01-14 20:48:02 +0000 | [diff] [blame] | 82 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
Ben Widawsky | 6e96e77 | 2011-07-13 14:38:18 -0700 | [diff] [blame] | 83 | MODULE_PARM_DESC(lvds_downclock, |
| 84 | "Use panel (LVDS/eDP) downclocking for power savings " |
| 85 | "(default: false)"); |
Jesse Barnes | 3381434 | 2010-01-14 20:48:02 +0000 | [diff] [blame] | 86 | |
Takashi Iwai | 121d527 | 2012-03-20 13:07:06 +0100 | [diff] [blame] | 87 | int i915_lvds_channel_mode __read_mostly; |
| 88 | module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600); |
| 89 | MODULE_PARM_DESC(lvds_channel_mode, |
| 90 | "Specify LVDS channel mode " |
| 91 | "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); |
| 92 | |
Keith Packard | 4415e63 | 2011-11-09 09:57:50 -0800 | [diff] [blame] | 93 | int i915_panel_use_ssc __read_mostly = -1; |
Chris Wilson | a761503 | 2011-01-12 17:04:08 +0000 | [diff] [blame] | 94 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); |
Ben Widawsky | 6e96e77 | 2011-07-13 14:38:18 -0700 | [diff] [blame] | 95 | MODULE_PARM_DESC(lvds_use_ssc, |
| 96 | "Use Spread Spectrum Clock with panels [LVDS/eDP] " |
Keith Packard | 72bbe58 | 2011-09-26 16:09:45 -0700 | [diff] [blame] | 97 | "(default: auto from VBT)"); |
Chris Wilson | a761503 | 2011-01-12 17:04:08 +0000 | [diff] [blame] | 98 | |
Ben Widawsky | a35d9d3 | 2011-07-13 14:38:17 -0700 | [diff] [blame] | 99 | int i915_vbt_sdvo_panel_type __read_mostly = -1; |
Chris Wilson | 5a1e5b6 | 2011-01-29 16:50:25 +0000 | [diff] [blame] | 100 | module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); |
Ben Widawsky | 6e96e77 | 2011-07-13 14:38:18 -0700 | [diff] [blame] | 101 | MODULE_PARM_DESC(vbt_sdvo_panel_type, |
Mathias Fröhlich | c10e408 | 2012-03-01 06:44:35 +0100 | [diff] [blame] | 102 | "Override/Ignore selection of SDVO panel mode in the VBT " |
| 103 | "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); |
Chris Wilson | 5a1e5b6 | 2011-01-29 16:50:25 +0000 | [diff] [blame] | 104 | |
Ben Widawsky | a35d9d3 | 2011-07-13 14:38:17 -0700 | [diff] [blame] | 105 | static bool i915_try_reset __read_mostly = true; |
Chris Wilson | d78cb50 | 2010-12-23 13:33:15 +0000 | [diff] [blame] | 106 | module_param_named(reset, i915_try_reset, bool, 0600); |
Ben Widawsky | 6e96e77 | 2011-07-13 14:38:18 -0700 | [diff] [blame] | 107 | MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); |
Chris Wilson | d78cb50 | 2010-12-23 13:33:15 +0000 | [diff] [blame] | 108 | |
Ben Widawsky | a35d9d3 | 2011-07-13 14:38:17 -0700 | [diff] [blame] | 109 | bool i915_enable_hangcheck __read_mostly = true; |
Ben Widawsky | 3e0dc6b | 2011-06-29 10:26:42 -0700 | [diff] [blame] | 110 | module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); |
Ben Widawsky | 6e96e77 | 2011-07-13 14:38:18 -0700 | [diff] [blame] | 111 | MODULE_PARM_DESC(enable_hangcheck, |
| 112 | "Periodically check GPU activity for detecting hangs. " |
| 113 | "WARNING: Disabling this can cause system wide hangs. " |
| 114 | "(default: true)"); |
Ben Widawsky | 3e0dc6b | 2011-06-29 10:26:42 -0700 | [diff] [blame] | 115 | |
Daniel Vetter | 650dc07 | 2012-04-02 10:08:35 +0200 | [diff] [blame] | 116 | int i915_enable_ppgtt __read_mostly = -1; |
| 117 | module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600); |
Daniel Vetter | e21af88 | 2012-02-09 20:53:27 +0100 | [diff] [blame] | 118 | MODULE_PARM_DESC(i915_enable_ppgtt, |
| 119 | "Enable PPGTT (default: true)"); |
| 120 | |
Rodrigo Vivi | 0a3af26 | 2012-10-15 17:16:23 -0300 | [diff] [blame] | 121 | unsigned int i915_preliminary_hw_support __read_mostly = 0; |
| 122 | module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); |
| 123 | MODULE_PARM_DESC(preliminary_hw_support, |
Damien Lespiau | c4aaf35 | 2013-02-18 16:47:42 +0000 | [diff] [blame] | 124 | "Enable preliminary hardware support. (default: false)"); |
Rodrigo Vivi | 0a3af26 | 2012-10-15 17:16:23 -0300 | [diff] [blame] | 125 | |
Paulo Zanoni | 2124b72 | 2013-03-22 14:07:23 -0300 | [diff] [blame] | 126 | int i915_disable_power_well __read_mostly = 0; |
| 127 | module_param_named(disable_power_well, i915_disable_power_well, int, 0600); |
| 128 | MODULE_PARM_DESC(disable_power_well, |
| 129 | "Disable the power well when possible (default: false)"); |
| 130 | |
Paulo Zanoni | 3c4ca58 | 2013-05-31 16:33:23 -0300 | [diff] [blame] | 131 | int i915_enable_ips __read_mostly = 1; |
| 132 | module_param_named(enable_ips, i915_enable_ips, int, 0600); |
| 133 | MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); |
| 134 | |
Kristian Høgsberg | 112b715 | 2009-01-04 16:55:33 -0500 | [diff] [blame] | 135 | static struct drm_driver driver; |
Zhenyu Wang | 1f7a6e3 | 2010-02-23 14:05:24 +0800 | [diff] [blame] | 136 | extern int intel_agp_enabled; |
Kristian Høgsberg | 112b715 | 2009-01-04 16:55:33 -0500 | [diff] [blame] | 137 | |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 138 | #define INTEL_VGA_DEVICE(id, info) { \ |
Daniel Vetter | 80a2901 | 2011-10-11 10:59:05 +0200 | [diff] [blame] | 139 | .class = PCI_BASE_CLASS_DISPLAY << 16, \ |
Chris Wilson | 934f992 | 2011-01-20 13:09:12 +0000 | [diff] [blame] | 140 | .class_mask = 0xff0000, \ |
Kristian Høgsberg | 49ae35f | 2009-12-16 15:16:15 -0500 | [diff] [blame] | 141 | .vendor = 0x8086, \ |
| 142 | .device = id, \ |
| 143 | .subvendor = PCI_ANY_ID, \ |
| 144 | .subdevice = PCI_ANY_ID, \ |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 145 | .driver_data = (unsigned long) info } |
Kristian Høgsberg | 49ae35f | 2009-12-16 15:16:15 -0500 | [diff] [blame] | 146 | |
Ben Widawsky | 999bcde | 2013-04-05 13:12:45 -0700 | [diff] [blame] | 147 | #define INTEL_QUANTA_VGA_DEVICE(info) { \ |
| 148 | .class = PCI_BASE_CLASS_DISPLAY << 16, \ |
| 149 | .class_mask = 0xff0000, \ |
| 150 | .vendor = 0x8086, \ |
| 151 | .device = 0x16a, \ |
| 152 | .subvendor = 0x152d, \ |
| 153 | .subdevice = 0x8990, \ |
| 154 | .driver_data = (unsigned long) info } |
| 155 | |
| 156 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 157 | static const struct intel_device_info intel_i830_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 158 | .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
Chris Wilson | 31578148 | 2010-08-12 09:42:51 +0100 | [diff] [blame] | 159 | .has_overlay = 1, .overlay_needs_physical = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 160 | }; |
| 161 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 162 | static const struct intel_device_info intel_845g_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 163 | .gen = 2, .num_pipes = 1, |
Chris Wilson | 31578148 | 2010-08-12 09:42:51 +0100 | [diff] [blame] | 164 | .has_overlay = 1, .overlay_needs_physical = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 165 | }; |
| 166 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 167 | static const struct intel_device_info intel_i85x_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 168 | .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, |
Adam Jackson | 5ce8ba7 | 2010-04-15 14:03:30 -0400 | [diff] [blame] | 169 | .cursor_needs_physical = 1, |
Chris Wilson | 31578148 | 2010-08-12 09:42:51 +0100 | [diff] [blame] | 170 | .has_overlay = 1, .overlay_needs_physical = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 171 | }; |
| 172 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 173 | static const struct intel_device_info intel_i865g_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 174 | .gen = 2, .num_pipes = 1, |
Chris Wilson | 31578148 | 2010-08-12 09:42:51 +0100 | [diff] [blame] | 175 | .has_overlay = 1, .overlay_needs_physical = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 176 | }; |
| 177 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 178 | static const struct intel_device_info intel_i915g_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 179 | .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
Chris Wilson | 31578148 | 2010-08-12 09:42:51 +0100 | [diff] [blame] | 180 | .has_overlay = 1, .overlay_needs_physical = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 181 | }; |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 182 | static const struct intel_device_info intel_i915gm_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 183 | .gen = 3, .is_mobile = 1, .num_pipes = 2, |
Kristian Høgsberg | b295d1b | 2009-12-16 15:16:17 -0500 | [diff] [blame] | 184 | .cursor_needs_physical = 1, |
Chris Wilson | 31578148 | 2010-08-12 09:42:51 +0100 | [diff] [blame] | 185 | .has_overlay = 1, .overlay_needs_physical = 1, |
Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 186 | .supports_tv = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 187 | }; |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 188 | static const struct intel_device_info intel_i945g_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 189 | .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
Chris Wilson | 31578148 | 2010-08-12 09:42:51 +0100 | [diff] [blame] | 190 | .has_overlay = 1, .overlay_needs_physical = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 191 | }; |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 192 | static const struct intel_device_info intel_i945gm_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 193 | .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, |
Kristian Høgsberg | b295d1b | 2009-12-16 15:16:17 -0500 | [diff] [blame] | 194 | .has_hotplug = 1, .cursor_needs_physical = 1, |
Chris Wilson | 31578148 | 2010-08-12 09:42:51 +0100 | [diff] [blame] | 195 | .has_overlay = 1, .overlay_needs_physical = 1, |
Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 196 | .supports_tv = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 197 | }; |
| 198 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 199 | static const struct intel_device_info intel_i965g_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 200 | .gen = 4, .is_broadwater = 1, .num_pipes = 2, |
Chris Wilson | c96c3a8c | 2010-08-11 09:59:24 +0100 | [diff] [blame] | 201 | .has_hotplug = 1, |
Chris Wilson | 31578148 | 2010-08-12 09:42:51 +0100 | [diff] [blame] | 202 | .has_overlay = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 203 | }; |
| 204 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 205 | static const struct intel_device_info intel_i965gm_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 206 | .gen = 4, .is_crestline = 1, .num_pipes = 2, |
Chris Wilson | e3c4e5d | 2010-12-05 16:49:51 +0000 | [diff] [blame] | 207 | .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, |
Chris Wilson | 31578148 | 2010-08-12 09:42:51 +0100 | [diff] [blame] | 208 | .has_overlay = 1, |
Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 209 | .supports_tv = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 210 | }; |
| 211 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 212 | static const struct intel_device_info intel_g33_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 213 | .gen = 3, .is_g33 = 1, .num_pipes = 2, |
Chris Wilson | c96c3a8c | 2010-08-11 09:59:24 +0100 | [diff] [blame] | 214 | .need_gfx_hws = 1, .has_hotplug = 1, |
Chris Wilson | 31578148 | 2010-08-12 09:42:51 +0100 | [diff] [blame] | 215 | .has_overlay = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 216 | }; |
| 217 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 218 | static const struct intel_device_info intel_g45_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 219 | .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, |
Chris Wilson | c96c3a8c | 2010-08-11 09:59:24 +0100 | [diff] [blame] | 220 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
Xiang, Haihao | 92f49d9 | 2010-09-16 10:43:10 +0800 | [diff] [blame] | 221 | .has_bsd_ring = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 222 | }; |
| 223 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 224 | static const struct intel_device_info intel_gm45_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 225 | .gen = 4, .is_g4x = 1, .num_pipes = 2, |
Chris Wilson | e3c4e5d | 2010-12-05 16:49:51 +0000 | [diff] [blame] | 226 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, |
Chris Wilson | c96c3a8c | 2010-08-11 09:59:24 +0100 | [diff] [blame] | 227 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 228 | .supports_tv = 1, |
Xiang, Haihao | 92f49d9 | 2010-09-16 10:43:10 +0800 | [diff] [blame] | 229 | .has_bsd_ring = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 230 | }; |
| 231 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 232 | static const struct intel_device_info intel_pineview_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 233 | .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, |
Chris Wilson | c96c3a8c | 2010-08-11 09:59:24 +0100 | [diff] [blame] | 234 | .need_gfx_hws = 1, .has_hotplug = 1, |
Chris Wilson | 31578148 | 2010-08-12 09:42:51 +0100 | [diff] [blame] | 235 | .has_overlay = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 236 | }; |
| 237 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 238 | static const struct intel_device_info intel_ironlake_d_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 239 | .gen = 5, .num_pipes = 2, |
Eugeni Dodonov | 5a117db | 2012-01-05 09:34:29 -0200 | [diff] [blame] | 240 | .need_gfx_hws = 1, .has_hotplug = 1, |
Xiang, Haihao | 92f49d9 | 2010-09-16 10:43:10 +0800 | [diff] [blame] | 241 | .has_bsd_ring = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 242 | }; |
| 243 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 244 | static const struct intel_device_info intel_ironlake_m_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 245 | .gen = 5, .is_mobile = 1, .num_pipes = 2, |
Chris Wilson | e3c4e5d | 2010-12-05 16:49:51 +0000 | [diff] [blame] | 246 | .need_gfx_hws = 1, .has_hotplug = 1, |
Jesse Barnes | c1a9f04 | 2011-05-05 15:24:21 -0700 | [diff] [blame] | 247 | .has_fbc = 1, |
Xiang, Haihao | 92f49d9 | 2010-09-16 10:43:10 +0800 | [diff] [blame] | 248 | .has_bsd_ring = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 249 | }; |
| 250 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 251 | static const struct intel_device_info intel_sandybridge_d_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 252 | .gen = 6, .num_pipes = 2, |
Chris Wilson | c96c3a8c | 2010-08-11 09:59:24 +0100 | [diff] [blame] | 253 | .need_gfx_hws = 1, .has_hotplug = 1, |
Xiang, Haihao | 881f47b | 2010-09-19 14:40:43 +0100 | [diff] [blame] | 254 | .has_bsd_ring = 1, |
Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 255 | .has_blt_ring = 1, |
Eugeni Dodonov | 3d29b84 | 2012-01-17 14:43:53 -0200 | [diff] [blame] | 256 | .has_llc = 1, |
Daniel Vetter | b7884eb | 2012-06-04 11:18:15 +0200 | [diff] [blame] | 257 | .has_force_wake = 1, |
Eric Anholt | f6e450a | 2009-11-02 12:08:22 -0800 | [diff] [blame] | 258 | }; |
| 259 | |
Tobias Klauser | 9a7e849 | 2010-05-20 10:33:46 +0200 | [diff] [blame] | 260 | static const struct intel_device_info intel_sandybridge_m_info = { |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 261 | .gen = 6, .is_mobile = 1, .num_pipes = 2, |
Chris Wilson | c96c3a8c | 2010-08-11 09:59:24 +0100 | [diff] [blame] | 262 | .need_gfx_hws = 1, .has_hotplug = 1, |
Yuanhan Liu | 9c04f01 | 2010-12-15 15:42:32 +0800 | [diff] [blame] | 263 | .has_fbc = 1, |
Xiang, Haihao | 881f47b | 2010-09-19 14:40:43 +0100 | [diff] [blame] | 264 | .has_bsd_ring = 1, |
Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 265 | .has_blt_ring = 1, |
Eugeni Dodonov | 3d29b84 | 2012-01-17 14:43:53 -0200 | [diff] [blame] | 266 | .has_llc = 1, |
Daniel Vetter | b7884eb | 2012-06-04 11:18:15 +0200 | [diff] [blame] | 267 | .has_force_wake = 1, |
Eric Anholt | a13e409 | 2010-01-07 15:08:18 -0800 | [diff] [blame] | 268 | }; |
| 269 | |
Ben Widawsky | 219f4fd | 2013-03-15 11:17:54 -0700 | [diff] [blame] | 270 | #define GEN7_FEATURES \ |
| 271 | .gen = 7, .num_pipes = 3, \ |
| 272 | .need_gfx_hws = 1, .has_hotplug = 1, \ |
| 273 | .has_bsd_ring = 1, \ |
| 274 | .has_blt_ring = 1, \ |
| 275 | .has_llc = 1, \ |
| 276 | .has_force_wake = 1 |
| 277 | |
Jesse Barnes | c76b615 | 2011-04-28 14:32:07 -0700 | [diff] [blame] | 278 | static const struct intel_device_info intel_ivybridge_d_info = { |
Ben Widawsky | 219f4fd | 2013-03-15 11:17:54 -0700 | [diff] [blame] | 279 | GEN7_FEATURES, |
| 280 | .is_ivybridge = 1, |
Jesse Barnes | c76b615 | 2011-04-28 14:32:07 -0700 | [diff] [blame] | 281 | }; |
| 282 | |
| 283 | static const struct intel_device_info intel_ivybridge_m_info = { |
Ben Widawsky | 219f4fd | 2013-03-15 11:17:54 -0700 | [diff] [blame] | 284 | GEN7_FEATURES, |
| 285 | .is_ivybridge = 1, |
| 286 | .is_mobile = 1, |
Rodrigo Vivi | abe959c | 2013-05-06 19:37:33 -0300 | [diff] [blame] | 287 | .has_fbc = 1, |
Jesse Barnes | c76b615 | 2011-04-28 14:32:07 -0700 | [diff] [blame] | 288 | }; |
| 289 | |
Ben Widawsky | 999bcde | 2013-04-05 13:12:45 -0700 | [diff] [blame] | 290 | static const struct intel_device_info intel_ivybridge_q_info = { |
| 291 | GEN7_FEATURES, |
| 292 | .is_ivybridge = 1, |
| 293 | .num_pipes = 0, /* legal, last one wins */ |
| 294 | }; |
| 295 | |
Jesse Barnes | 70a3eb7 | 2012-03-28 13:39:21 -0700 | [diff] [blame] | 296 | static const struct intel_device_info intel_valleyview_m_info = { |
Ben Widawsky | 219f4fd | 2013-03-15 11:17:54 -0700 | [diff] [blame] | 297 | GEN7_FEATURES, |
| 298 | .is_mobile = 1, |
| 299 | .num_pipes = 2, |
Jesse Barnes | 70a3eb7 | 2012-03-28 13:39:21 -0700 | [diff] [blame] | 300 | .is_valleyview = 1, |
Ville Syrjälä | fba5d53 | 2013-01-24 15:29:56 +0200 | [diff] [blame] | 301 | .display_mmio_offset = VLV_DISPLAY_BASE, |
Ben Widawsky | 30ccd96 | 2013-04-15 21:48:03 -0700 | [diff] [blame] | 302 | .has_llc = 0, /* legal, last one wins */ |
Jesse Barnes | 70a3eb7 | 2012-03-28 13:39:21 -0700 | [diff] [blame] | 303 | }; |
| 304 | |
| 305 | static const struct intel_device_info intel_valleyview_d_info = { |
Ben Widawsky | 219f4fd | 2013-03-15 11:17:54 -0700 | [diff] [blame] | 306 | GEN7_FEATURES, |
| 307 | .num_pipes = 2, |
Jesse Barnes | 70a3eb7 | 2012-03-28 13:39:21 -0700 | [diff] [blame] | 308 | .is_valleyview = 1, |
Ville Syrjälä | fba5d53 | 2013-01-24 15:29:56 +0200 | [diff] [blame] | 309 | .display_mmio_offset = VLV_DISPLAY_BASE, |
Ben Widawsky | 30ccd96 | 2013-04-15 21:48:03 -0700 | [diff] [blame] | 310 | .has_llc = 0, /* legal, last one wins */ |
Jesse Barnes | 70a3eb7 | 2012-03-28 13:39:21 -0700 | [diff] [blame] | 311 | }; |
| 312 | |
Eugeni Dodonov | 4cae9ae | 2012-03-29 12:32:18 -0300 | [diff] [blame] | 313 | static const struct intel_device_info intel_haswell_d_info = { |
Ben Widawsky | 219f4fd | 2013-03-15 11:17:54 -0700 | [diff] [blame] | 314 | GEN7_FEATURES, |
| 315 | .is_haswell = 1, |
Damien Lespiau | dd93be5 | 2013-04-22 18:40:39 +0100 | [diff] [blame] | 316 | .has_ddi = 1, |
Damien Lespiau | 30568c4 | 2013-04-22 18:40:41 +0100 | [diff] [blame] | 317 | .has_fpga_dbg = 1, |
Xiang, Haihao | f72a118 | 2013-05-28 19:22:22 -0700 | [diff] [blame] | 318 | .has_vebox_ring = 1, |
Eugeni Dodonov | 4cae9ae | 2012-03-29 12:32:18 -0300 | [diff] [blame] | 319 | }; |
| 320 | |
| 321 | static const struct intel_device_info intel_haswell_m_info = { |
Ben Widawsky | 219f4fd | 2013-03-15 11:17:54 -0700 | [diff] [blame] | 322 | GEN7_FEATURES, |
| 323 | .is_haswell = 1, |
| 324 | .is_mobile = 1, |
Damien Lespiau | dd93be5 | 2013-04-22 18:40:39 +0100 | [diff] [blame] | 325 | .has_ddi = 1, |
Damien Lespiau | 30568c4 | 2013-04-22 18:40:41 +0100 | [diff] [blame] | 326 | .has_fpga_dbg = 1, |
Rodrigo Vivi | 891348b | 2013-05-06 19:37:36 -0300 | [diff] [blame] | 327 | .has_fbc = 1, |
Xiang, Haihao | f72a118 | 2013-05-28 19:22:22 -0700 | [diff] [blame] | 328 | .has_vebox_ring = 1, |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 329 | }; |
| 330 | |
Chris Wilson | 6103da0 | 2010-07-05 18:01:47 +0100 | [diff] [blame] | 331 | static const struct pci_device_id pciidlist[] = { /* aka */ |
| 332 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ |
| 333 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ |
| 334 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ |
Adam Jackson | 5ce8ba7 | 2010-04-15 14:03:30 -0400 | [diff] [blame] | 335 | INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), |
Chris Wilson | 6103da0 | 2010-07-05 18:01:47 +0100 | [diff] [blame] | 336 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ |
| 337 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ |
| 338 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ |
| 339 | INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ |
| 340 | INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ |
| 341 | INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ |
| 342 | INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ |
| 343 | INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ |
| 344 | INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ |
| 345 | INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ |
| 346 | INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ |
| 347 | INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ |
| 348 | INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ |
| 349 | INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ |
| 350 | INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ |
| 351 | INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ |
| 352 | INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ |
| 353 | INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ |
| 354 | INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ |
| 355 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ |
| 356 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ |
| 357 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ |
Chris Wilson | 41a5142 | 2010-09-17 08:22:30 +0100 | [diff] [blame] | 358 | INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 359 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), |
| 360 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), |
| 361 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), |
| 362 | INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), |
Eric Anholt | f6e450a | 2009-11-02 12:08:22 -0800 | [diff] [blame] | 363 | INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), |
Zhenyu Wang | 8554048 | 2010-09-07 13:45:32 +0800 | [diff] [blame] | 364 | INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), |
| 365 | INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), |
Eric Anholt | a13e409 | 2010-01-07 15:08:18 -0800 | [diff] [blame] | 366 | INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), |
Zhenyu Wang | 8554048 | 2010-09-07 13:45:32 +0800 | [diff] [blame] | 367 | INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), |
Zhenyu Wang | 4fefe43 | 2010-08-19 09:46:16 +0800 | [diff] [blame] | 368 | INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), |
Zhenyu Wang | 8554048 | 2010-09-07 13:45:32 +0800 | [diff] [blame] | 369 | INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), |
Jesse Barnes | c76b615 | 2011-04-28 14:32:07 -0700 | [diff] [blame] | 370 | INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ |
| 371 | INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ |
| 372 | INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ |
| 373 | INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ |
| 374 | INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ |
Ben Widawsky | 999bcde | 2013-04-05 13:12:45 -0700 | [diff] [blame] | 375 | INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */ |
Eugeni Dodonov | cc22a93 | 2012-03-29 20:55:48 -0300 | [diff] [blame] | 376 | INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ |
Eugeni Dodonov | c14f528 | 2012-05-09 15:37:32 -0300 | [diff] [blame] | 377 | INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ |
| 378 | INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ |
Paulo Zanoni | da612d8 | 2012-08-06 18:45:01 -0300 | [diff] [blame] | 379 | INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */ |
Eugeni Dodonov | c14f528 | 2012-05-09 15:37:32 -0300 | [diff] [blame] | 380 | INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ |
| 381 | INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ |
Paulo Zanoni | da612d8 | 2012-08-06 18:45:01 -0300 | [diff] [blame] | 382 | INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */ |
Eugeni Dodonov | c14f528 | 2012-05-09 15:37:32 -0300 | [diff] [blame] | 383 | INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ |
| 384 | INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ |
Paulo Zanoni | da612d8 | 2012-08-06 18:45:01 -0300 | [diff] [blame] | 385 | INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ |
| 386 | INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ |
| 387 | INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ |
| 388 | INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */ |
| 389 | INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ |
| 390 | INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ |
| 391 | INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */ |
| 392 | INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ |
| 393 | INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ |
| 394 | INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */ |
| 395 | INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ |
| 396 | INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ |
| 397 | INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */ |
| 398 | INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ |
| 399 | INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ |
| 400 | INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */ |
| 401 | INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ |
| 402 | INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ |
| 403 | INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ |
Kenneth Graunke | 86c268e | 2013-03-01 17:00:50 -0800 | [diff] [blame] | 404 | INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ |
| 405 | INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ |
Paulo Zanoni | da612d8 | 2012-08-06 18:45:01 -0300 | [diff] [blame] | 406 | INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ |
Kenneth Graunke | 86c268e | 2013-03-01 17:00:50 -0800 | [diff] [blame] | 407 | INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ |
| 408 | INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ |
Paulo Zanoni | da612d8 | 2012-08-06 18:45:01 -0300 | [diff] [blame] | 409 | INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ |
Kenneth Graunke | 86c268e | 2013-03-01 17:00:50 -0800 | [diff] [blame] | 410 | INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ |
| 411 | INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ |
Paulo Zanoni | da612d8 | 2012-08-06 18:45:01 -0300 | [diff] [blame] | 412 | INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ |
Jesse Barnes | ff049b6 | 2012-06-20 10:53:13 -0700 | [diff] [blame] | 413 | INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), |
Jesse Barnes | d7fee5f | 2013-03-08 10:45:50 -0800 | [diff] [blame] | 414 | INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info), |
| 415 | INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info), |
| 416 | INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info), |
Jesse Barnes | ff049b6 | 2012-06-20 10:53:13 -0700 | [diff] [blame] | 417 | INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), |
| 418 | INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), |
Kristian Høgsberg | 49ae35f | 2009-12-16 15:16:15 -0500 | [diff] [blame] | 419 | {0, 0, 0} |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | }; |
| 421 | |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 422 | #if defined(CONFIG_DRM_I915_KMS) |
| 423 | MODULE_DEVICE_TABLE(pci, pciidlist); |
| 424 | #endif |
| 425 | |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 426 | void intel_detect_pch(struct drm_device *dev) |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 427 | { |
| 428 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 429 | struct pci_dev *pch; |
| 430 | |
Ben Widawsky | ce1bb32 | 2013-04-05 13:12:44 -0700 | [diff] [blame] | 431 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting |
| 432 | * (which really amounts to a PCH but no South Display). |
| 433 | */ |
| 434 | if (INTEL_INFO(dev)->num_pipes == 0) { |
| 435 | dev_priv->pch_type = PCH_NOP; |
| 436 | dev_priv->num_pch_pll = 0; |
| 437 | return; |
| 438 | } |
| 439 | |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 440 | /* |
| 441 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to |
| 442 | * make graphics device passthrough work easy for VMM, that only |
| 443 | * need to expose ISA bridge to let driver know the real hardware |
| 444 | * underneath. This is a requirement from virtualization team. |
| 445 | */ |
| 446 | pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); |
| 447 | if (pch) { |
| 448 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { |
Paulo Zanoni | 17a303e | 2012-11-20 15:12:07 -0200 | [diff] [blame] | 449 | unsigned short id; |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 450 | id = pch->device & INTEL_PCH_DEVICE_ID_MASK; |
Paulo Zanoni | 17a303e | 2012-11-20 15:12:07 -0200 | [diff] [blame] | 451 | dev_priv->pch_id = id; |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 452 | |
Jesse Barnes | 90711d5 | 2011-04-28 14:48:02 -0700 | [diff] [blame] | 453 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { |
| 454 | dev_priv->pch_type = PCH_IBX; |
Jesse Barnes | ee7b9f9 | 2012-04-20 17:11:53 +0100 | [diff] [blame] | 455 | dev_priv->num_pch_pll = 2; |
Jesse Barnes | 90711d5 | 2011-04-28 14:48:02 -0700 | [diff] [blame] | 456 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); |
Daniel Vetter | 7fcb83c | 2012-10-31 22:52:27 +0100 | [diff] [blame] | 457 | WARN_ON(!IS_GEN5(dev)); |
Jesse Barnes | 90711d5 | 2011-04-28 14:48:02 -0700 | [diff] [blame] | 458 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 459 | dev_priv->pch_type = PCH_CPT; |
Jesse Barnes | ee7b9f9 | 2012-04-20 17:11:53 +0100 | [diff] [blame] | 460 | dev_priv->num_pch_pll = 2; |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 461 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); |
Daniel Vetter | 7fcb83c | 2012-10-31 22:52:27 +0100 | [diff] [blame] | 462 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); |
Jesse Barnes | c792513 | 2011-04-07 12:33:56 -0700 | [diff] [blame] | 463 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { |
| 464 | /* PantherPoint is CPT compatible */ |
| 465 | dev_priv->pch_type = PCH_CPT; |
Jesse Barnes | ee7b9f9 | 2012-04-20 17:11:53 +0100 | [diff] [blame] | 466 | dev_priv->num_pch_pll = 2; |
Jesse Barnes | c792513 | 2011-04-07 12:33:56 -0700 | [diff] [blame] | 467 | DRM_DEBUG_KMS("Found PatherPoint PCH\n"); |
Daniel Vetter | 7fcb83c | 2012-10-31 22:52:27 +0100 | [diff] [blame] | 468 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); |
Eugeni Dodonov | eb877eb | 2012-03-29 12:32:20 -0300 | [diff] [blame] | 469 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
| 470 | dev_priv->pch_type = PCH_LPT; |
Jesse Barnes | ee7b9f9 | 2012-04-20 17:11:53 +0100 | [diff] [blame] | 471 | dev_priv->num_pch_pll = 0; |
Eugeni Dodonov | eb877eb | 2012-03-29 12:32:20 -0300 | [diff] [blame] | 472 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); |
Daniel Vetter | 7fcb83c | 2012-10-31 22:52:27 +0100 | [diff] [blame] | 473 | WARN_ON(!IS_HASWELL(dev)); |
Paulo Zanoni | 08e1413 | 2013-04-12 18:16:54 -0300 | [diff] [blame] | 474 | WARN_ON(IS_ULT(dev)); |
Wei Shun Chang | ae6935d | 2012-11-12 18:54:13 -0200 | [diff] [blame] | 475 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
| 476 | dev_priv->pch_type = PCH_LPT; |
| 477 | dev_priv->num_pch_pll = 0; |
| 478 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); |
| 479 | WARN_ON(!IS_HASWELL(dev)); |
Paulo Zanoni | 08e1413 | 2013-04-12 18:16:54 -0300 | [diff] [blame] | 480 | WARN_ON(!IS_ULT(dev)); |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 481 | } |
Jesse Barnes | ee7b9f9 | 2012-04-20 17:11:53 +0100 | [diff] [blame] | 482 | BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 483 | } |
| 484 | pci_dev_put(pch); |
| 485 | } |
| 486 | } |
| 487 | |
Ben Widawsky | 2911a35 | 2012-04-05 14:47:36 -0700 | [diff] [blame] | 488 | bool i915_semaphore_is_enabled(struct drm_device *dev) |
| 489 | { |
| 490 | if (INTEL_INFO(dev)->gen < 6) |
| 491 | return 0; |
| 492 | |
| 493 | if (i915_semaphores >= 0) |
| 494 | return i915_semaphores; |
| 495 | |
Daniel Vetter | 59de329 | 2012-04-02 20:48:43 +0200 | [diff] [blame] | 496 | #ifdef CONFIG_INTEL_IOMMU |
Ben Widawsky | 2911a35 | 2012-04-05 14:47:36 -0700 | [diff] [blame] | 497 | /* Enable semaphores on SNB when IO remapping is off */ |
Daniel Vetter | 59de329 | 2012-04-02 20:48:43 +0200 | [diff] [blame] | 498 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) |
| 499 | return false; |
| 500 | #endif |
Ben Widawsky | 2911a35 | 2012-04-05 14:47:36 -0700 | [diff] [blame] | 501 | |
| 502 | return 1; |
| 503 | } |
| 504 | |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 505 | static int i915_drm_freeze(struct drm_device *dev) |
| 506 | { |
Rafael J. Wysocki | 61caf87 | 2010-02-18 23:06:27 +0100 | [diff] [blame] | 507 | struct drm_i915_private *dev_priv = dev->dev_private; |
Jesse Barnes | 24576d2 | 2013-03-26 09:25:45 -0700 | [diff] [blame] | 508 | struct drm_crtc *crtc; |
Rafael J. Wysocki | 61caf87 | 2010-02-18 23:06:27 +0100 | [diff] [blame] | 509 | |
Zhang Rui | b8efb17 | 2013-02-05 15:41:53 +0800 | [diff] [blame] | 510 | /* ignore lid events during suspend */ |
| 511 | mutex_lock(&dev_priv->modeset_restore_lock); |
| 512 | dev_priv->modeset_restore = MODESET_SUSPENDED; |
| 513 | mutex_unlock(&dev_priv->modeset_restore_lock); |
| 514 | |
Paulo Zanoni | cb10799 | 2013-01-25 16:59:15 -0200 | [diff] [blame] | 515 | intel_set_power_well(dev, true); |
| 516 | |
Dave Airlie | 5bcf719 | 2010-12-07 09:20:40 +1000 | [diff] [blame] | 517 | drm_kms_helper_poll_disable(dev); |
| 518 | |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 519 | pci_save_state(dev->pdev); |
| 520 | |
| 521 | /* If KMS is active, we do the leavevt stuff here */ |
| 522 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
| 523 | int error = i915_gem_idle(dev); |
| 524 | if (error) { |
| 525 | dev_err(&dev->pdev->dev, |
| 526 | "GEM idle failed, resume might fail\n"); |
| 527 | return error; |
| 528 | } |
Daniel Vetter | a261b24 | 2012-07-26 19:21:47 +0200 | [diff] [blame] | 529 | |
Jesse Barnes | 1a01ab3 | 2012-11-02 11:14:00 -0700 | [diff] [blame] | 530 | cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); |
| 531 | |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 532 | drm_irq_uninstall(dev); |
Daniel Vetter | 1523909 | 2013-03-05 09:50:58 +0100 | [diff] [blame] | 533 | dev_priv->enable_hotplug_processing = false; |
Jesse Barnes | 24576d2 | 2013-03-26 09:25:45 -0700 | [diff] [blame] | 534 | /* |
| 535 | * Disable CRTCs directly since we want to preserve sw state |
| 536 | * for _thaw. |
| 537 | */ |
| 538 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
| 539 | dev_priv->display.crtc_disable(crtc); |
Imre Deak | 7d708ee | 2013-04-17 14:04:50 +0300 | [diff] [blame] | 540 | |
| 541 | intel_modeset_suspend_hw(dev); |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 542 | } |
| 543 | |
| 544 | i915_save_state(dev); |
| 545 | |
Chris Wilson | 44834a6 | 2010-08-19 16:09:23 +0100 | [diff] [blame] | 546 | intel_opregion_fini(dev); |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 547 | |
Dave Airlie | 3fa016a | 2012-03-28 10:48:49 +0100 | [diff] [blame] | 548 | console_lock(); |
Damien Lespiau | b6f3eff | 2013-06-10 15:48:09 +0100 | [diff] [blame^] | 549 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); |
Dave Airlie | 3fa016a | 2012-03-28 10:48:49 +0100 | [diff] [blame] | 550 | console_unlock(); |
| 551 | |
Rafael J. Wysocki | 61caf87 | 2010-02-18 23:06:27 +0100 | [diff] [blame] | 552 | return 0; |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 553 | } |
| 554 | |
Dave Airlie | 6a9ee8a | 2010-02-01 15:38:10 +1000 | [diff] [blame] | 555 | int i915_suspend(struct drm_device *dev, pm_message_t state) |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 556 | { |
| 557 | int error; |
| 558 | |
| 559 | if (!dev || !dev->dev_private) { |
| 560 | DRM_ERROR("dev: %p\n", dev); |
Keith Packard | 1ae8c0a | 2009-06-28 15:42:17 -0700 | [diff] [blame] | 561 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 562 | return -ENODEV; |
| 563 | } |
| 564 | |
Dave Airlie | b932ccb | 2008-02-20 10:02:20 +1000 | [diff] [blame] | 565 | if (state.event == PM_EVENT_PRETHAW) |
| 566 | return 0; |
| 567 | |
Dave Airlie | 5bcf719 | 2010-12-07 09:20:40 +1000 | [diff] [blame] | 568 | |
| 569 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
| 570 | return 0; |
Chris Wilson | 6eecba3 | 2010-09-08 09:45:11 +0100 | [diff] [blame] | 571 | |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 572 | error = i915_drm_freeze(dev); |
| 573 | if (error) |
| 574 | return error; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 575 | |
Dave Airlie | b932ccb | 2008-02-20 10:02:20 +1000 | [diff] [blame] | 576 | if (state.event == PM_EVENT_SUSPEND) { |
| 577 | /* Shut down the device */ |
| 578 | pci_disable_device(dev->pdev); |
| 579 | pci_set_power_state(dev->pdev, PCI_D3hot); |
| 580 | } |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 581 | |
| 582 | return 0; |
| 583 | } |
| 584 | |
Jesse Barnes | 073f34d | 2012-11-02 11:13:59 -0700 | [diff] [blame] | 585 | void intel_console_resume(struct work_struct *work) |
| 586 | { |
| 587 | struct drm_i915_private *dev_priv = |
| 588 | container_of(work, struct drm_i915_private, |
| 589 | console_resume_work); |
| 590 | struct drm_device *dev = dev_priv->dev; |
| 591 | |
| 592 | console_lock(); |
Damien Lespiau | b6f3eff | 2013-06-10 15:48:09 +0100 | [diff] [blame^] | 593 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING); |
Jesse Barnes | 073f34d | 2012-11-02 11:13:59 -0700 | [diff] [blame] | 594 | console_unlock(); |
| 595 | } |
| 596 | |
Jesse Barnes | bb60b96 | 2013-03-26 09:25:46 -0700 | [diff] [blame] | 597 | static void intel_resume_hotplug(struct drm_device *dev) |
| 598 | { |
| 599 | struct drm_mode_config *mode_config = &dev->mode_config; |
| 600 | struct intel_encoder *encoder; |
| 601 | |
| 602 | mutex_lock(&mode_config->mutex); |
| 603 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
| 604 | |
| 605 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
| 606 | if (encoder->hot_plug) |
| 607 | encoder->hot_plug(encoder); |
| 608 | |
| 609 | mutex_unlock(&mode_config->mutex); |
| 610 | |
| 611 | /* Just fire off a uevent and let userspace tell us what to do */ |
| 612 | drm_helper_hpd_irq_event(dev); |
| 613 | } |
| 614 | |
Jesse Barnes | 1abd02e | 2012-11-02 11:14:02 -0700 | [diff] [blame] | 615 | static int __i915_drm_thaw(struct drm_device *dev) |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 616 | { |
Jesse Barnes | 5669fca | 2009-02-17 15:13:31 -0800 | [diff] [blame] | 617 | struct drm_i915_private *dev_priv = dev->dev_private; |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 618 | int error = 0; |
Matthew Garrett | 8ee1c3d | 2008-08-05 19:37:25 +0100 | [diff] [blame] | 619 | |
Rafael J. Wysocki | 61caf87 | 2010-02-18 23:06:27 +0100 | [diff] [blame] | 620 | i915_restore_state(dev); |
Chris Wilson | 44834a6 | 2010-08-19 16:09:23 +0100 | [diff] [blame] | 621 | intel_opregion_setup(dev); |
Rafael J. Wysocki | 61caf87 | 2010-02-18 23:06:27 +0100 | [diff] [blame] | 622 | |
Jesse Barnes | 5669fca | 2009-02-17 15:13:31 -0800 | [diff] [blame] | 623 | /* KMS EnterVT equivalent */ |
| 624 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
Paulo Zanoni | dde86e2 | 2012-12-01 12:04:25 -0200 | [diff] [blame] | 625 | intel_init_pch_refclk(dev); |
Chris Wilson | 1833b13 | 2012-05-09 11:56:28 +0100 | [diff] [blame] | 626 | |
Jesse Barnes | 5669fca | 2009-02-17 15:13:31 -0800 | [diff] [blame] | 627 | mutex_lock(&dev->struct_mutex); |
| 628 | dev_priv->mm.suspended = 0; |
| 629 | |
Daniel Vetter | f691e2f | 2012-02-02 09:58:12 +0100 | [diff] [blame] | 630 | error = i915_gem_init_hw(dev); |
Jesse Barnes | 5669fca | 2009-02-17 15:13:31 -0800 | [diff] [blame] | 631 | mutex_unlock(&dev->struct_mutex); |
Jesse Barnes | 226485e | 2009-02-23 15:41:09 -0800 | [diff] [blame] | 632 | |
Daniel Vetter | 1523909 | 2013-03-05 09:50:58 +0100 | [diff] [blame] | 633 | /* We need working interrupts for modeset enabling ... */ |
| 634 | drm_irq_install(dev); |
| 635 | |
Chris Wilson | 1833b13 | 2012-05-09 11:56:28 +0100 | [diff] [blame] | 636 | intel_modeset_init_hw(dev); |
Jesse Barnes | 24576d2 | 2013-03-26 09:25:45 -0700 | [diff] [blame] | 637 | |
| 638 | drm_modeset_lock_all(dev); |
| 639 | intel_modeset_setup_hw_state(dev, true); |
| 640 | drm_modeset_unlock_all(dev); |
Daniel Vetter | 1523909 | 2013-03-05 09:50:58 +0100 | [diff] [blame] | 641 | |
| 642 | /* |
| 643 | * ... but also need to make sure that hotplug processing |
| 644 | * doesn't cause havoc. Like in the driver load code we don't |
| 645 | * bother with the tiny race here where we might loose hotplug |
| 646 | * notifications. |
| 647 | * */ |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 648 | intel_hpd_init(dev); |
Daniel Vetter | 1523909 | 2013-03-05 09:50:58 +0100 | [diff] [blame] | 649 | dev_priv->enable_hotplug_processing = true; |
Jesse Barnes | bb60b96 | 2013-03-26 09:25:46 -0700 | [diff] [blame] | 650 | /* Config may have changed between suspend and resume */ |
| 651 | intel_resume_hotplug(dev); |
Jesse Barnes | d5bb081 | 2011-01-05 12:01:26 -0800 | [diff] [blame] | 652 | } |
Jesse Barnes | 1daed3f | 2011-01-05 12:01:25 -0800 | [diff] [blame] | 653 | |
Chris Wilson | 44834a6 | 2010-08-19 16:09:23 +0100 | [diff] [blame] | 654 | intel_opregion_init(dev); |
| 655 | |
Jesse Barnes | 073f34d | 2012-11-02 11:13:59 -0700 | [diff] [blame] | 656 | /* |
| 657 | * The console lock can be pretty contented on resume due |
| 658 | * to all the printk activity. Try to keep it out of the hot |
| 659 | * path of resume if possible. |
| 660 | */ |
| 661 | if (console_trylock()) { |
Damien Lespiau | b6f3eff | 2013-06-10 15:48:09 +0100 | [diff] [blame^] | 662 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING); |
Jesse Barnes | 073f34d | 2012-11-02 11:13:59 -0700 | [diff] [blame] | 663 | console_unlock(); |
| 664 | } else { |
| 665 | schedule_work(&dev_priv->console_resume_work); |
| 666 | } |
| 667 | |
Zhang Rui | b8efb17 | 2013-02-05 15:41:53 +0800 | [diff] [blame] | 668 | mutex_lock(&dev_priv->modeset_restore_lock); |
| 669 | dev_priv->modeset_restore = MODESET_DONE; |
| 670 | mutex_unlock(&dev_priv->modeset_restore_lock); |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 671 | return error; |
| 672 | } |
| 673 | |
Jesse Barnes | 1abd02e | 2012-11-02 11:14:02 -0700 | [diff] [blame] | 674 | static int i915_drm_thaw(struct drm_device *dev) |
| 675 | { |
| 676 | int error = 0; |
| 677 | |
| 678 | intel_gt_reset(dev); |
| 679 | |
| 680 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
| 681 | mutex_lock(&dev->struct_mutex); |
| 682 | i915_gem_restore_gtt_mappings(dev); |
| 683 | mutex_unlock(&dev->struct_mutex); |
| 684 | } |
| 685 | |
| 686 | __i915_drm_thaw(dev); |
| 687 | |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 688 | return error; |
| 689 | } |
| 690 | |
Dave Airlie | 6a9ee8a | 2010-02-01 15:38:10 +1000 | [diff] [blame] | 691 | int i915_resume(struct drm_device *dev) |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 692 | { |
Jesse Barnes | 1abd02e | 2012-11-02 11:14:02 -0700 | [diff] [blame] | 693 | struct drm_i915_private *dev_priv = dev->dev_private; |
Chris Wilson | 6eecba3 | 2010-09-08 09:45:11 +0100 | [diff] [blame] | 694 | int ret; |
| 695 | |
Dave Airlie | 5bcf719 | 2010-12-07 09:20:40 +1000 | [diff] [blame] | 696 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
| 697 | return 0; |
| 698 | |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 699 | if (pci_enable_device(dev->pdev)) |
| 700 | return -EIO; |
| 701 | |
| 702 | pci_set_master(dev->pdev); |
| 703 | |
Jesse Barnes | 1abd02e | 2012-11-02 11:14:02 -0700 | [diff] [blame] | 704 | intel_gt_reset(dev); |
| 705 | |
| 706 | /* |
| 707 | * Platforms with opregion should have sane BIOS, older ones (gen3 and |
| 708 | * earlier) need this since the BIOS might clear all our scratch PTEs. |
| 709 | */ |
| 710 | if (drm_core_check_feature(dev, DRIVER_MODESET) && |
| 711 | !dev_priv->opregion.header) { |
| 712 | mutex_lock(&dev->struct_mutex); |
| 713 | i915_gem_restore_gtt_mappings(dev); |
| 714 | mutex_unlock(&dev->struct_mutex); |
| 715 | } |
| 716 | |
| 717 | ret = __i915_drm_thaw(dev); |
Chris Wilson | 6eecba3 | 2010-09-08 09:45:11 +0100 | [diff] [blame] | 718 | if (ret) |
| 719 | return ret; |
| 720 | |
| 721 | drm_kms_helper_poll_enable(dev); |
| 722 | return 0; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 723 | } |
| 724 | |
Daniel Vetter | d4b8bb2 | 2012-04-27 15:17:44 +0200 | [diff] [blame] | 725 | static int i8xx_do_reset(struct drm_device *dev) |
Chris Wilson | dc96e9b | 2010-10-01 12:05:06 +0100 | [diff] [blame] | 726 | { |
| 727 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 728 | |
| 729 | if (IS_I85X(dev)) |
| 730 | return -ENODEV; |
| 731 | |
| 732 | I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); |
| 733 | POSTING_READ(D_STATE); |
| 734 | |
| 735 | if (IS_I830(dev) || IS_845G(dev)) { |
| 736 | I915_WRITE(DEBUG_RESET_I830, |
| 737 | DEBUG_RESET_DISPLAY | |
| 738 | DEBUG_RESET_RENDER | |
| 739 | DEBUG_RESET_FULL); |
| 740 | POSTING_READ(DEBUG_RESET_I830); |
| 741 | msleep(1); |
| 742 | |
| 743 | I915_WRITE(DEBUG_RESET_I830, 0); |
| 744 | POSTING_READ(DEBUG_RESET_I830); |
| 745 | } |
| 746 | |
| 747 | msleep(1); |
| 748 | |
| 749 | I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); |
| 750 | POSTING_READ(D_STATE); |
| 751 | |
| 752 | return 0; |
| 753 | } |
| 754 | |
Kenneth Graunke | f49f058 | 2010-09-11 01:19:14 -0700 | [diff] [blame] | 755 | static int i965_reset_complete(struct drm_device *dev) |
| 756 | { |
| 757 | u8 gdrst; |
Kenneth Graunke | eeccdca | 2010-09-11 01:24:50 -0700 | [diff] [blame] | 758 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); |
Daniel Vetter | 5fe9fe8 | 2012-05-02 21:33:52 +0200 | [diff] [blame] | 759 | return (gdrst & GRDOM_RESET_ENABLE) == 0; |
Kenneth Graunke | f49f058 | 2010-09-11 01:19:14 -0700 | [diff] [blame] | 760 | } |
| 761 | |
Daniel Vetter | d4b8bb2 | 2012-04-27 15:17:44 +0200 | [diff] [blame] | 762 | static int i965_do_reset(struct drm_device *dev) |
Kenneth Graunke | 0573ed4 | 2010-09-11 03:17:19 -0700 | [diff] [blame] | 763 | { |
Daniel Vetter | 5ccce18 | 2012-04-27 15:17:45 +0200 | [diff] [blame] | 764 | int ret; |
Kenneth Graunke | 0573ed4 | 2010-09-11 03:17:19 -0700 | [diff] [blame] | 765 | u8 gdrst; |
| 766 | |
Chris Wilson | ae681d9 | 2010-10-01 14:57:56 +0100 | [diff] [blame] | 767 | /* |
| 768 | * Set the domains we want to reset (GRDOM/bits 2 and 3) as |
| 769 | * well as the reset bit (GR/bit 0). Setting the GR bit |
| 770 | * triggers the reset; when done, the hardware will clear it. |
| 771 | */ |
Kenneth Graunke | 0573ed4 | 2010-09-11 03:17:19 -0700 | [diff] [blame] | 772 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); |
Daniel Vetter | d4b8bb2 | 2012-04-27 15:17:44 +0200 | [diff] [blame] | 773 | pci_write_config_byte(dev->pdev, I965_GDRST, |
Daniel Vetter | 5ccce18 | 2012-04-27 15:17:45 +0200 | [diff] [blame] | 774 | gdrst | GRDOM_RENDER | |
| 775 | GRDOM_RESET_ENABLE); |
| 776 | ret = wait_for(i965_reset_complete(dev), 500); |
| 777 | if (ret) |
| 778 | return ret; |
| 779 | |
| 780 | /* We can't reset render&media without also resetting display ... */ |
| 781 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); |
| 782 | pci_write_config_byte(dev->pdev, I965_GDRST, |
| 783 | gdrst | GRDOM_MEDIA | |
| 784 | GRDOM_RESET_ENABLE); |
Kenneth Graunke | 0573ed4 | 2010-09-11 03:17:19 -0700 | [diff] [blame] | 785 | |
| 786 | return wait_for(i965_reset_complete(dev), 500); |
| 787 | } |
| 788 | |
Daniel Vetter | d4b8bb2 | 2012-04-27 15:17:44 +0200 | [diff] [blame] | 789 | static int ironlake_do_reset(struct drm_device *dev) |
Kenneth Graunke | 0573ed4 | 2010-09-11 03:17:19 -0700 | [diff] [blame] | 790 | { |
| 791 | struct drm_i915_private *dev_priv = dev->dev_private; |
Daniel Vetter | 5ccce18 | 2012-04-27 15:17:45 +0200 | [diff] [blame] | 792 | u32 gdrst; |
| 793 | int ret; |
| 794 | |
| 795 | gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); |
Jesse Barnes | 8a5c2ae | 2013-03-28 13:57:19 -0700 | [diff] [blame] | 796 | gdrst &= ~GRDOM_MASK; |
Daniel Vetter | d4b8bb2 | 2012-04-27 15:17:44 +0200 | [diff] [blame] | 797 | I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, |
Daniel Vetter | 5ccce18 | 2012-04-27 15:17:45 +0200 | [diff] [blame] | 798 | gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); |
| 799 | ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); |
| 800 | if (ret) |
| 801 | return ret; |
| 802 | |
| 803 | /* We can't reset render&media without also resetting display ... */ |
| 804 | gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); |
Jesse Barnes | 8a5c2ae | 2013-03-28 13:57:19 -0700 | [diff] [blame] | 805 | gdrst &= ~GRDOM_MASK; |
Daniel Vetter | 5ccce18 | 2012-04-27 15:17:45 +0200 | [diff] [blame] | 806 | I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, |
| 807 | gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); |
Kenneth Graunke | 0573ed4 | 2010-09-11 03:17:19 -0700 | [diff] [blame] | 808 | return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | } |
| 810 | |
Daniel Vetter | d4b8bb2 | 2012-04-27 15:17:44 +0200 | [diff] [blame] | 811 | static int gen6_do_reset(struct drm_device *dev) |
Eric Anholt | cff458c | 2010-11-18 09:31:14 +0800 | [diff] [blame] | 812 | { |
| 813 | struct drm_i915_private *dev_priv = dev->dev_private; |
Keith Packard | b6e45f8 | 2012-01-06 11:34:04 -0800 | [diff] [blame] | 814 | int ret; |
| 815 | unsigned long irqflags; |
Eric Anholt | cff458c | 2010-11-18 09:31:14 +0800 | [diff] [blame] | 816 | |
Keith Packard | 286fed4 | 2012-01-06 11:44:11 -0800 | [diff] [blame] | 817 | /* Hold gt_lock across reset to prevent any register access |
| 818 | * with forcewake not set correctly |
| 819 | */ |
Keith Packard | b6e45f8 | 2012-01-06 11:34:04 -0800 | [diff] [blame] | 820 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); |
Keith Packard | 286fed4 | 2012-01-06 11:44:11 -0800 | [diff] [blame] | 821 | |
| 822 | /* Reset the chip */ |
| 823 | |
| 824 | /* GEN6_GDRST is not in the gt power well, no need to check |
| 825 | * for fifo space for the write or forcewake the chip for |
| 826 | * the read |
| 827 | */ |
| 828 | I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); |
| 829 | |
| 830 | /* Spin waiting for the device to ack the reset request */ |
| 831 | ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); |
| 832 | |
| 833 | /* If reset with a user forcewake, try to restore, otherwise turn it off */ |
Keith Packard | b6e45f8 | 2012-01-06 11:34:04 -0800 | [diff] [blame] | 834 | if (dev_priv->forcewake_count) |
Chris Wilson | 990bbda | 2012-07-02 11:51:02 -0300 | [diff] [blame] | 835 | dev_priv->gt.force_wake_get(dev_priv); |
Keith Packard | 286fed4 | 2012-01-06 11:44:11 -0800 | [diff] [blame] | 836 | else |
Chris Wilson | 990bbda | 2012-07-02 11:51:02 -0300 | [diff] [blame] | 837 | dev_priv->gt.force_wake_put(dev_priv); |
Keith Packard | 286fed4 | 2012-01-06 11:44:11 -0800 | [diff] [blame] | 838 | |
| 839 | /* Restore fifo count */ |
| 840 | dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); |
| 841 | |
Keith Packard | b6e45f8 | 2012-01-06 11:34:04 -0800 | [diff] [blame] | 842 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); |
| 843 | return ret; |
Eric Anholt | cff458c | 2010-11-18 09:31:14 +0800 | [diff] [blame] | 844 | } |
| 845 | |
Ben Widawsky | 8e96d9c | 2012-06-04 14:42:56 -0700 | [diff] [blame] | 846 | int intel_gpu_reset(struct drm_device *dev) |
Daniel Vetter | 350d270 | 2012-04-27 15:17:42 +0200 | [diff] [blame] | 847 | { |
Daniel Vetter | 350d270 | 2012-04-27 15:17:42 +0200 | [diff] [blame] | 848 | switch (INTEL_INFO(dev)->gen) { |
| 849 | case 7: |
Chris Wilson | 2e7c8ee | 2013-05-28 10:38:44 +0100 | [diff] [blame] | 850 | case 6: return gen6_do_reset(dev); |
| 851 | case 5: return ironlake_do_reset(dev); |
| 852 | case 4: return i965_do_reset(dev); |
| 853 | case 2: return i8xx_do_reset(dev); |
| 854 | default: return -ENODEV; |
Daniel Vetter | 350d270 | 2012-04-27 15:17:42 +0200 | [diff] [blame] | 855 | } |
Daniel Vetter | 350d270 | 2012-04-27 15:17:42 +0200 | [diff] [blame] | 856 | } |
| 857 | |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 858 | /** |
Eugeni Dodonov | f3953dc | 2011-11-28 16:15:17 -0200 | [diff] [blame] | 859 | * i915_reset - reset chip after a hang |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 860 | * @dev: drm device to reset |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 861 | * |
| 862 | * Reset the chip. Useful if a hang is detected. Returns zero on successful |
| 863 | * reset or otherwise an error code. |
| 864 | * |
| 865 | * Procedure is fairly simple: |
| 866 | * - reset the chip using the reset reg |
| 867 | * - re-init context state |
| 868 | * - re-init hardware status page |
| 869 | * - re-init ring buffer |
| 870 | * - re-init interrupt state |
| 871 | * - re-init display |
| 872 | */ |
Daniel Vetter | d4b8bb2 | 2012-04-27 15:17:44 +0200 | [diff] [blame] | 873 | int i915_reset(struct drm_device *dev) |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 874 | { |
| 875 | drm_i915_private_t *dev_priv = dev->dev_private; |
Chris Wilson | 2e7c8ee | 2013-05-28 10:38:44 +0100 | [diff] [blame] | 876 | bool simulated; |
Kenneth Graunke | 0573ed4 | 2010-09-11 03:17:19 -0700 | [diff] [blame] | 877 | int ret; |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 878 | |
Chris Wilson | d78cb50 | 2010-12-23 13:33:15 +0000 | [diff] [blame] | 879 | if (!i915_try_reset) |
| 880 | return 0; |
| 881 | |
Daniel Vetter | d54a02c | 2012-07-04 22:18:39 +0200 | [diff] [blame] | 882 | mutex_lock(&dev->struct_mutex); |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 883 | |
Chris Wilson | 069efc1 | 2010-09-30 16:53:18 +0100 | [diff] [blame] | 884 | i915_gem_reset(dev); |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 885 | |
Chris Wilson | 2e7c8ee | 2013-05-28 10:38:44 +0100 | [diff] [blame] | 886 | simulated = dev_priv->gpu_error.stop_rings != 0; |
| 887 | |
| 888 | if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) { |
Chris Wilson | ae681d9 | 2010-10-01 14:57:56 +0100 | [diff] [blame] | 889 | DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); |
Chris Wilson | 2e7c8ee | 2013-05-28 10:38:44 +0100 | [diff] [blame] | 890 | ret = -ENODEV; |
| 891 | } else { |
Daniel Vetter | d4b8bb2 | 2012-04-27 15:17:44 +0200 | [diff] [blame] | 892 | ret = intel_gpu_reset(dev); |
Daniel Vetter | 350d270 | 2012-04-27 15:17:42 +0200 | [diff] [blame] | 893 | |
Chris Wilson | 2e7c8ee | 2013-05-28 10:38:44 +0100 | [diff] [blame] | 894 | /* Also reset the gpu hangman. */ |
| 895 | if (simulated) { |
| 896 | DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); |
| 897 | dev_priv->gpu_error.stop_rings = 0; |
| 898 | if (ret == -ENODEV) { |
| 899 | DRM_ERROR("Reset not implemented, but ignoring " |
| 900 | "error for simulated gpu hangs\n"); |
| 901 | ret = 0; |
| 902 | } |
| 903 | } else |
| 904 | dev_priv->gpu_error.last_reset = get_seconds(); |
| 905 | } |
Kenneth Graunke | 0573ed4 | 2010-09-11 03:17:19 -0700 | [diff] [blame] | 906 | if (ret) { |
Chris Wilson | f803aa5 | 2010-09-19 12:38:26 +0100 | [diff] [blame] | 907 | DRM_ERROR("Failed to reset chip.\n"); |
Daniel J Blueman | f953c93 | 2010-05-17 14:23:52 +0100 | [diff] [blame] | 908 | mutex_unlock(&dev->struct_mutex); |
Chris Wilson | f803aa5 | 2010-09-19 12:38:26 +0100 | [diff] [blame] | 909 | return ret; |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 910 | } |
| 911 | |
| 912 | /* Ok, now get things going again... */ |
| 913 | |
| 914 | /* |
| 915 | * Everything depends on having the GTT running, so we need to start |
| 916 | * there. Fortunately we don't need to do this unless we reset the |
| 917 | * chip at a PCI level. |
| 918 | * |
| 919 | * Next we need to restore the context, but we don't use those |
| 920 | * yet either... |
| 921 | * |
| 922 | * Ring buffer needs to be re-initialized in the KMS case, or if X |
| 923 | * was running at the time of the reset (i.e. we weren't VT |
| 924 | * switched away). |
| 925 | */ |
| 926 | if (drm_core_check_feature(dev, DRIVER_MODESET) || |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 927 | !dev_priv->mm.suspended) { |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 928 | struct intel_ring_buffer *ring; |
| 929 | int i; |
| 930 | |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 931 | dev_priv->mm.suspended = 0; |
Eric Anholt | 75a6898 | 2010-11-18 09:31:13 +0800 | [diff] [blame] | 932 | |
Daniel Vetter | f691e2f | 2012-02-02 09:58:12 +0100 | [diff] [blame] | 933 | i915_gem_init_swizzling(dev); |
| 934 | |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 935 | for_each_ring(ring, dev_priv, i) |
| 936 | ring->init(ring); |
Eric Anholt | 75a6898 | 2010-11-18 09:31:13 +0800 | [diff] [blame] | 937 | |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 938 | i915_gem_context_init(dev); |
Ben Widawsky | b7c36d2 | 2013-04-08 18:43:56 -0700 | [diff] [blame] | 939 | if (dev_priv->mm.aliasing_ppgtt) { |
| 940 | ret = dev_priv->mm.aliasing_ppgtt->enable(dev); |
| 941 | if (ret) |
| 942 | i915_gem_cleanup_aliasing_ppgtt(dev); |
| 943 | } |
Daniel Vetter | e21af88 | 2012-02-09 20:53:27 +0100 | [diff] [blame] | 944 | |
Daniel Vetter | 8e88a2b | 2012-06-19 18:40:00 +0200 | [diff] [blame] | 945 | /* |
| 946 | * It would make sense to re-init all the other hw state, at |
| 947 | * least the rps/rc6/emon init done within modeset_init_hw. For |
| 948 | * some unknown reason, this blows up my ilk, so don't. |
| 949 | */ |
Daniel Vetter | f817586 | 2012-04-10 15:50:11 +0200 | [diff] [blame] | 950 | |
Daniel Vetter | 8e88a2b | 2012-06-19 18:40:00 +0200 | [diff] [blame] | 951 | mutex_unlock(&dev->struct_mutex); |
Daniel Vetter | f817586 | 2012-04-10 15:50:11 +0200 | [diff] [blame] | 952 | |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 953 | drm_irq_uninstall(dev); |
| 954 | drm_irq_install(dev); |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 955 | intel_hpd_init(dev); |
Daniel Vetter | bcbc324 | 2012-04-27 15:17:41 +0200 | [diff] [blame] | 956 | } else { |
| 957 | mutex_unlock(&dev->struct_mutex); |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 958 | } |
| 959 | |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 960 | return 0; |
| 961 | } |
| 962 | |
Greg Kroah-Hartman | 56550d9 | 2012-12-21 15:09:25 -0800 | [diff] [blame] | 963 | static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
Kristian Høgsberg | 112b715 | 2009-01-04 16:55:33 -0500 | [diff] [blame] | 964 | { |
Daniel Vetter | 01a0685 | 2012-06-25 15:58:49 +0200 | [diff] [blame] | 965 | struct intel_device_info *intel_info = |
| 966 | (struct intel_device_info *) ent->driver_data; |
| 967 | |
Chris Wilson | 5fe49d8 | 2011-02-01 19:43:02 +0000 | [diff] [blame] | 968 | /* Only bind to function 0 of the device. Early generations |
| 969 | * used function 1 as a placeholder for multi-head. This causes |
| 970 | * us confusion instead, especially on the systems where both |
| 971 | * functions have the same PCI-ID! |
| 972 | */ |
| 973 | if (PCI_FUNC(pdev->devfn)) |
| 974 | return -ENODEV; |
| 975 | |
Daniel Vetter | 01a0685 | 2012-06-25 15:58:49 +0200 | [diff] [blame] | 976 | /* We've managed to ship a kms-enabled ddx that shipped with an XvMC |
| 977 | * implementation for gen3 (and only gen3) that used legacy drm maps |
| 978 | * (gasp!) to share buffers between X and the client. Hence we need to |
| 979 | * keep around the fake agp stuff for gen3, even when kms is enabled. */ |
| 980 | if (intel_info->gen != 3) { |
| 981 | driver.driver_features &= |
| 982 | ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP); |
| 983 | } else if (!intel_agp_enabled) { |
| 984 | DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); |
| 985 | return -ENODEV; |
| 986 | } |
| 987 | |
Jordan Crouse | dcdb167 | 2010-05-27 13:40:25 -0600 | [diff] [blame] | 988 | return drm_get_pci_dev(pdev, ent, &driver); |
Kristian Høgsberg | 112b715 | 2009-01-04 16:55:33 -0500 | [diff] [blame] | 989 | } |
| 990 | |
| 991 | static void |
| 992 | i915_pci_remove(struct pci_dev *pdev) |
| 993 | { |
| 994 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 995 | |
| 996 | drm_put_dev(dev); |
| 997 | } |
| 998 | |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 999 | static int i915_pm_suspend(struct device *dev) |
Kristian Høgsberg | 112b715 | 2009-01-04 16:55:33 -0500 | [diff] [blame] | 1000 | { |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1001 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1002 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
| 1003 | int error; |
Kristian Høgsberg | 112b715 | 2009-01-04 16:55:33 -0500 | [diff] [blame] | 1004 | |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1005 | if (!drm_dev || !drm_dev->dev_private) { |
| 1006 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); |
| 1007 | return -ENODEV; |
| 1008 | } |
Kristian Høgsberg | 112b715 | 2009-01-04 16:55:33 -0500 | [diff] [blame] | 1009 | |
Dave Airlie | 5bcf719 | 2010-12-07 09:20:40 +1000 | [diff] [blame] | 1010 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
| 1011 | return 0; |
| 1012 | |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1013 | error = i915_drm_freeze(drm_dev); |
| 1014 | if (error) |
| 1015 | return error; |
Kristian Høgsberg | 112b715 | 2009-01-04 16:55:33 -0500 | [diff] [blame] | 1016 | |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1017 | pci_disable_device(pdev); |
| 1018 | pci_set_power_state(pdev, PCI_D3hot); |
Zhenyu Wang | cbda12d | 2009-12-16 13:36:10 +0800 | [diff] [blame] | 1019 | |
Zhenyu Wang | cbda12d | 2009-12-16 13:36:10 +0800 | [diff] [blame] | 1020 | return 0; |
| 1021 | } |
| 1022 | |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1023 | static int i915_pm_resume(struct device *dev) |
Zhenyu Wang | cbda12d | 2009-12-16 13:36:10 +0800 | [diff] [blame] | 1024 | { |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1025 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1026 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
| 1027 | |
| 1028 | return i915_resume(drm_dev); |
Zhenyu Wang | cbda12d | 2009-12-16 13:36:10 +0800 | [diff] [blame] | 1029 | } |
| 1030 | |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1031 | static int i915_pm_freeze(struct device *dev) |
Zhenyu Wang | cbda12d | 2009-12-16 13:36:10 +0800 | [diff] [blame] | 1032 | { |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1033 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1034 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
| 1035 | |
| 1036 | if (!drm_dev || !drm_dev->dev_private) { |
| 1037 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); |
| 1038 | return -ENODEV; |
| 1039 | } |
| 1040 | |
| 1041 | return i915_drm_freeze(drm_dev); |
| 1042 | } |
| 1043 | |
| 1044 | static int i915_pm_thaw(struct device *dev) |
| 1045 | { |
| 1046 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1047 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
| 1048 | |
| 1049 | return i915_drm_thaw(drm_dev); |
| 1050 | } |
| 1051 | |
| 1052 | static int i915_pm_poweroff(struct device *dev) |
| 1053 | { |
| 1054 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1055 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1056 | |
Rafael J. Wysocki | 61caf87 | 2010-02-18 23:06:27 +0100 | [diff] [blame] | 1057 | return i915_drm_freeze(drm_dev); |
Zhenyu Wang | cbda12d | 2009-12-16 13:36:10 +0800 | [diff] [blame] | 1058 | } |
| 1059 | |
Chris Wilson | b4b78d1 | 2010-06-06 15:40:20 +0100 | [diff] [blame] | 1060 | static const struct dev_pm_ops i915_pm_ops = { |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 1061 | .suspend = i915_pm_suspend, |
| 1062 | .resume = i915_pm_resume, |
| 1063 | .freeze = i915_pm_freeze, |
| 1064 | .thaw = i915_pm_thaw, |
| 1065 | .poweroff = i915_pm_poweroff, |
| 1066 | .restore = i915_pm_resume, |
Zhenyu Wang | cbda12d | 2009-12-16 13:36:10 +0800 | [diff] [blame] | 1067 | }; |
| 1068 | |
Laurent Pinchart | 78b6855 | 2012-05-17 13:27:22 +0200 | [diff] [blame] | 1069 | static const struct vm_operations_struct i915_gem_vm_ops = { |
Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1070 | .fault = i915_gem_fault, |
Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1071 | .open = drm_gem_vm_open, |
| 1072 | .close = drm_gem_vm_close, |
Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1073 | }; |
| 1074 | |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 1075 | static const struct file_operations i915_driver_fops = { |
| 1076 | .owner = THIS_MODULE, |
| 1077 | .open = drm_open, |
| 1078 | .release = drm_release, |
| 1079 | .unlocked_ioctl = drm_ioctl, |
| 1080 | .mmap = drm_gem_mmap, |
| 1081 | .poll = drm_poll, |
| 1082 | .fasync = drm_fasync, |
| 1083 | .read = drm_read, |
| 1084 | #ifdef CONFIG_COMPAT |
| 1085 | .compat_ioctl = i915_compat_ioctl, |
| 1086 | #endif |
| 1087 | .llseek = noop_llseek, |
| 1088 | }; |
| 1089 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1090 | static struct drm_driver driver = { |
Michael Witten | 0c54781 | 2011-08-25 17:55:54 +0000 | [diff] [blame] | 1091 | /* Don't use MTRRs here; the Xserver or userspace app should |
| 1092 | * deal with them for Intel hardware. |
Dave Airlie | 792d2b9 | 2005-11-11 23:30:27 +1100 | [diff] [blame] | 1093 | */ |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1094 | .driver_features = |
| 1095 | DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ |
Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 1096 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, |
Dave Airlie | 22eae94 | 2005-11-10 22:16:34 +1100 | [diff] [blame] | 1097 | .load = i915_driver_load, |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 1098 | .unload = i915_driver_unload, |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1099 | .open = i915_driver_open, |
Dave Airlie | 22eae94 | 2005-11-10 22:16:34 +1100 | [diff] [blame] | 1100 | .lastclose = i915_driver_lastclose, |
| 1101 | .preclose = i915_driver_preclose, |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1102 | .postclose = i915_driver_postclose, |
Rafael J. Wysocki | d8e2920 | 2010-01-09 00:45:33 +0100 | [diff] [blame] | 1103 | |
| 1104 | /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ |
| 1105 | .suspend = i915_suspend, |
| 1106 | .resume = i915_resume, |
| 1107 | |
Dave Airlie | cda1738 | 2005-07-10 17:31:26 +1000 | [diff] [blame] | 1108 | .device_is_agp = i915_driver_device_is_agp, |
Dave Airlie | 7c1c287 | 2008-11-28 14:22:24 +1000 | [diff] [blame] | 1109 | .master_create = i915_master_create, |
| 1110 | .master_destroy = i915_master_destroy, |
Ben Gamari | 955b12d | 2009-02-17 20:08:49 -0500 | [diff] [blame] | 1111 | #if defined(CONFIG_DEBUG_FS) |
Ben Gamari | 27c202a | 2009-07-01 22:26:52 -0400 | [diff] [blame] | 1112 | .debugfs_init = i915_debugfs_init, |
| 1113 | .debugfs_cleanup = i915_debugfs_cleanup, |
Ben Gamari | 955b12d | 2009-02-17 20:08:49 -0500 | [diff] [blame] | 1114 | #endif |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1115 | .gem_init_object = i915_gem_init_object, |
| 1116 | .gem_free_object = i915_gem_free_object, |
Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1117 | .gem_vm_ops = &i915_gem_vm_ops, |
Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 1118 | |
| 1119 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
| 1120 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
| 1121 | .gem_prime_export = i915_gem_prime_export, |
| 1122 | .gem_prime_import = i915_gem_prime_import, |
| 1123 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 1124 | .dumb_create = i915_gem_dumb_create, |
| 1125 | .dumb_map_offset = i915_gem_mmap_gtt, |
| 1126 | .dumb_destroy = i915_gem_dumb_destroy, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 | .ioctls = i915_ioctls, |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 1128 | .fops = &i915_driver_fops, |
Dave Airlie | 22eae94 | 2005-11-10 22:16:34 +1100 | [diff] [blame] | 1129 | .name = DRIVER_NAME, |
| 1130 | .desc = DRIVER_DESC, |
| 1131 | .date = DRIVER_DATE, |
| 1132 | .major = DRIVER_MAJOR, |
| 1133 | .minor = DRIVER_MINOR, |
| 1134 | .patchlevel = DRIVER_PATCHLEVEL, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1135 | }; |
| 1136 | |
Dave Airlie | 8410ea3 | 2010-12-15 03:16:38 +1000 | [diff] [blame] | 1137 | static struct pci_driver i915_pci_driver = { |
| 1138 | .name = DRIVER_NAME, |
| 1139 | .id_table = pciidlist, |
| 1140 | .probe = i915_pci_probe, |
| 1141 | .remove = i915_pci_remove, |
| 1142 | .driver.pm = &i915_pm_ops, |
| 1143 | }; |
| 1144 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 | static int __init i915_init(void) |
| 1146 | { |
| 1147 | driver.num_ioctls = i915_max_ioctl; |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 1148 | |
| 1149 | /* |
| 1150 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless |
| 1151 | * explicitly disabled with the module pararmeter. |
| 1152 | * |
| 1153 | * Otherwise, just follow the parameter (defaulting to off). |
| 1154 | * |
| 1155 | * Allow optional vga_text_mode_force boot option to override |
| 1156 | * the default behavior. |
| 1157 | */ |
| 1158 | #if defined(CONFIG_DRM_I915_KMS) |
| 1159 | if (i915_modeset != 0) |
| 1160 | driver.driver_features |= DRIVER_MODESET; |
| 1161 | #endif |
| 1162 | if (i915_modeset == 1) |
| 1163 | driver.driver_features |= DRIVER_MODESET; |
| 1164 | |
| 1165 | #ifdef CONFIG_VGA_CONSOLE |
| 1166 | if (vgacon_text_force() && i915_modeset == -1) |
| 1167 | driver.driver_features &= ~DRIVER_MODESET; |
| 1168 | #endif |
| 1169 | |
Chris Wilson | 3885c6b | 2011-01-23 10:45:14 +0000 | [diff] [blame] | 1170 | if (!(driver.driver_features & DRIVER_MODESET)) |
| 1171 | driver.get_vblank_timestamp = NULL; |
| 1172 | |
Dave Airlie | 8410ea3 | 2010-12-15 03:16:38 +1000 | [diff] [blame] | 1173 | return drm_pci_init(&driver, &i915_pci_driver); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1174 | } |
| 1175 | |
| 1176 | static void __exit i915_exit(void) |
| 1177 | { |
Dave Airlie | 8410ea3 | 2010-12-15 03:16:38 +1000 | [diff] [blame] | 1178 | drm_pci_exit(&driver, &i915_pci_driver); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1179 | } |
| 1180 | |
| 1181 | module_init(i915_init); |
| 1182 | module_exit(i915_exit); |
| 1183 | |
Dave Airlie | b5e89ed | 2005-09-25 14:28:13 +1000 | [diff] [blame] | 1184 | MODULE_AUTHOR(DRIVER_AUTHOR); |
| 1185 | MODULE_DESCRIPTION(DRIVER_DESC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1186 | MODULE_LICENSE("GPL and additional rights"); |
Andi Kleen | f700088 | 2011-10-13 16:08:51 -0700 | [diff] [blame] | 1187 | |
Jesse Barnes | b7d8409 | 2012-03-22 14:38:43 -0700 | [diff] [blame] | 1188 | /* We give fast paths for the really cool registers */ |
| 1189 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ |
Daniel Vetter | b7884eb | 2012-06-04 11:18:15 +0200 | [diff] [blame] | 1190 | ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ |
| 1191 | ((reg) < 0x40000) && \ |
| 1192 | ((reg) != FORCEWAKE)) |
Daniel Vetter | a8b1397 | 2012-10-18 14:16:09 +0200 | [diff] [blame] | 1193 | static void |
| 1194 | ilk_dummy_write(struct drm_i915_private *dev_priv) |
| 1195 | { |
Damien Lespiau | ecdb4eb7 | 2013-05-03 18:48:10 +0100 | [diff] [blame] | 1196 | /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up |
| 1197 | * the chip from rc6 before touching it for real. MI_MODE is masked, |
| 1198 | * hence harmless to write 0 into. */ |
Daniel Vetter | a8b1397 | 2012-10-18 14:16:09 +0200 | [diff] [blame] | 1199 | I915_WRITE_NOTRACE(MI_MODE, 0); |
| 1200 | } |
| 1201 | |
Paulo Zanoni | 115bc2d | 2013-02-18 19:00:20 -0300 | [diff] [blame] | 1202 | static void |
| 1203 | hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) |
| 1204 | { |
Damien Lespiau | e76ebff | 2013-04-22 18:40:40 +0100 | [diff] [blame] | 1205 | if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && |
Paulo Zanoni | 3f1e109 | 2013-02-18 19:00:21 -0300 | [diff] [blame] | 1206 | (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { |
Paulo Zanoni | 115bc2d | 2013-02-18 19:00:20 -0300 | [diff] [blame] | 1207 | DRM_ERROR("Unknown unclaimed register before writing to %x\n", |
| 1208 | reg); |
Paulo Zanoni | 3f1e109 | 2013-02-18 19:00:21 -0300 | [diff] [blame] | 1209 | I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
Paulo Zanoni | 115bc2d | 2013-02-18 19:00:20 -0300 | [diff] [blame] | 1210 | } |
| 1211 | } |
| 1212 | |
| 1213 | static void |
| 1214 | hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) |
| 1215 | { |
Damien Lespiau | e76ebff | 2013-04-22 18:40:40 +0100 | [diff] [blame] | 1216 | if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && |
Paulo Zanoni | 3f1e109 | 2013-02-18 19:00:21 -0300 | [diff] [blame] | 1217 | (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { |
Paulo Zanoni | 115bc2d | 2013-02-18 19:00:20 -0300 | [diff] [blame] | 1218 | DRM_ERROR("Unclaimed write to %x\n", reg); |
Paulo Zanoni | 3f1e109 | 2013-02-18 19:00:21 -0300 | [diff] [blame] | 1219 | I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
Paulo Zanoni | 115bc2d | 2013-02-18 19:00:20 -0300 | [diff] [blame] | 1220 | } |
| 1221 | } |
| 1222 | |
Andi Kleen | f700088 | 2011-10-13 16:08:51 -0700 | [diff] [blame] | 1223 | #define __i915_read(x, y) \ |
| 1224 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
| 1225 | u##x val = 0; \ |
Daniel Vetter | a8b1397 | 2012-10-18 14:16:09 +0200 | [diff] [blame] | 1226 | if (IS_GEN5(dev_priv->dev)) \ |
| 1227 | ilk_dummy_write(dev_priv); \ |
Andi Kleen | f700088 | 2011-10-13 16:08:51 -0700 | [diff] [blame] | 1228 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
Keith Packard | c937504 | 2012-01-06 11:48:38 -0800 | [diff] [blame] | 1229 | unsigned long irqflags; \ |
| 1230 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ |
| 1231 | if (dev_priv->forcewake_count == 0) \ |
Chris Wilson | 990bbda | 2012-07-02 11:51:02 -0300 | [diff] [blame] | 1232 | dev_priv->gt.force_wake_get(dev_priv); \ |
Andi Kleen | f700088 | 2011-10-13 16:08:51 -0700 | [diff] [blame] | 1233 | val = read##y(dev_priv->regs + reg); \ |
Keith Packard | c937504 | 2012-01-06 11:48:38 -0800 | [diff] [blame] | 1234 | if (dev_priv->forcewake_count == 0) \ |
Chris Wilson | 990bbda | 2012-07-02 11:51:02 -0300 | [diff] [blame] | 1235 | dev_priv->gt.force_wake_put(dev_priv); \ |
Keith Packard | c937504 | 2012-01-06 11:48:38 -0800 | [diff] [blame] | 1236 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ |
Andi Kleen | f700088 | 2011-10-13 16:08:51 -0700 | [diff] [blame] | 1237 | } else { \ |
| 1238 | val = read##y(dev_priv->regs + reg); \ |
| 1239 | } \ |
| 1240 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ |
| 1241 | return val; \ |
| 1242 | } |
| 1243 | |
| 1244 | __i915_read(8, b) |
| 1245 | __i915_read(16, w) |
| 1246 | __i915_read(32, l) |
| 1247 | __i915_read(64, q) |
| 1248 | #undef __i915_read |
| 1249 | |
| 1250 | #define __i915_write(x, y) \ |
| 1251 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ |
Ben Widawsky | 67a3744 | 2012-02-09 10:15:20 +0100 | [diff] [blame] | 1252 | u32 __fifo_ret = 0; \ |
Andi Kleen | f700088 | 2011-10-13 16:08:51 -0700 | [diff] [blame] | 1253 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ |
| 1254 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
Ben Widawsky | 67a3744 | 2012-02-09 10:15:20 +0100 | [diff] [blame] | 1255 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
Andi Kleen | f700088 | 2011-10-13 16:08:51 -0700 | [diff] [blame] | 1256 | } \ |
Daniel Vetter | a8b1397 | 2012-10-18 14:16:09 +0200 | [diff] [blame] | 1257 | if (IS_GEN5(dev_priv->dev)) \ |
| 1258 | ilk_dummy_write(dev_priv); \ |
Paulo Zanoni | 115bc2d | 2013-02-18 19:00:20 -0300 | [diff] [blame] | 1259 | hsw_unclaimed_reg_clear(dev_priv, reg); \ |
Ville Syrjälä | fe31b57 | 2013-01-25 21:44:47 +0200 | [diff] [blame] | 1260 | write##y(val, dev_priv->regs + reg); \ |
Ben Widawsky | 67a3744 | 2012-02-09 10:15:20 +0100 | [diff] [blame] | 1261 | if (unlikely(__fifo_ret)) { \ |
| 1262 | gen6_gt_check_fifodbg(dev_priv); \ |
| 1263 | } \ |
Paulo Zanoni | 115bc2d | 2013-02-18 19:00:20 -0300 | [diff] [blame] | 1264 | hsw_unclaimed_reg_check(dev_priv, reg); \ |
Andi Kleen | f700088 | 2011-10-13 16:08:51 -0700 | [diff] [blame] | 1265 | } |
| 1266 | __i915_write(8, b) |
| 1267 | __i915_write(16, w) |
| 1268 | __i915_write(32, l) |
| 1269 | __i915_write(64, q) |
| 1270 | #undef __i915_write |
Ben Widawsky | c0c7bab | 2012-07-12 11:01:05 -0700 | [diff] [blame] | 1271 | |
| 1272 | static const struct register_whitelist { |
| 1273 | uint64_t offset; |
| 1274 | uint32_t size; |
| 1275 | uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ |
| 1276 | } whitelist[] = { |
| 1277 | { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, |
| 1278 | }; |
| 1279 | |
| 1280 | int i915_reg_read_ioctl(struct drm_device *dev, |
| 1281 | void *data, struct drm_file *file) |
| 1282 | { |
| 1283 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1284 | struct drm_i915_reg_read *reg = data; |
| 1285 | struct register_whitelist const *entry = whitelist; |
| 1286 | int i; |
| 1287 | |
| 1288 | for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { |
| 1289 | if (entry->offset == reg->offset && |
| 1290 | (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) |
| 1291 | break; |
| 1292 | } |
| 1293 | |
| 1294 | if (i == ARRAY_SIZE(whitelist)) |
| 1295 | return -EINVAL; |
| 1296 | |
| 1297 | switch (entry->size) { |
| 1298 | case 8: |
| 1299 | reg->val = I915_READ64(reg->offset); |
| 1300 | break; |
| 1301 | case 4: |
| 1302 | reg->val = I915_READ(reg->offset); |
| 1303 | break; |
| 1304 | case 2: |
| 1305 | reg->val = I915_READ16(reg->offset); |
| 1306 | break; |
| 1307 | case 1: |
| 1308 | reg->val = I915_READ8(reg->offset); |
| 1309 | break; |
| 1310 | default: |
| 1311 | WARN_ON(1); |
| 1312 | return -EINVAL; |
| 1313 | } |
| 1314 | |
| 1315 | return 0; |
| 1316 | } |