Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- |
| 2 | */ |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 3 | /* |
Dave Airlie | bc54fd1 | 2005-06-23 22:46:46 +1000 | [diff] [blame] | 4 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
| 6 | * All Rights Reserved. |
Dave Airlie | bc54fd1 | 2005-06-23 22:46:46 +1000 | [diff] [blame] | 7 | * |
| 8 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 9 | * copy of this software and associated documentation files (the |
| 10 | * "Software"), to deal in the Software without restriction, including |
| 11 | * without limitation the rights to use, copy, modify, merge, publish, |
| 12 | * distribute, sub license, and/or sell copies of the Software, and to |
| 13 | * permit persons to whom the Software is furnished to do so, subject to |
| 14 | * the following conditions: |
| 15 | * |
| 16 | * The above copyright notice and this permission notice (including the |
| 17 | * next paragraph) shall be included in all copies or substantial portions |
| 18 | * of the Software. |
| 19 | * |
| 20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
| 21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
| 23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
| 24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| 25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| 26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 27 | * |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 28 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
| 30 | #ifndef _I915_DRV_H_ |
| 31 | #define _I915_DRV_H_ |
| 32 | |
Chris Wilson | e9b73c6 | 2012-12-03 21:03:14 +0000 | [diff] [blame] | 33 | #include <uapi/drm/i915_drm.h> |
| 34 | |
Jesse Barnes | 585fb11 | 2008-07-29 11:54:06 -0700 | [diff] [blame] | 35 | #include "i915_reg.h" |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 36 | #include "intel_bios.h" |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 37 | #include "intel_ringbuffer.h" |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 38 | #include <linux/io-mapping.h> |
Chris Wilson | f899fc6 | 2010-07-20 15:44:45 -0700 | [diff] [blame] | 39 | #include <linux/i2c.h> |
Daniel Vetter | c167a6f | 2012-02-28 00:43:09 +0100 | [diff] [blame] | 40 | #include <linux/i2c-algo-bit.h> |
Daniel Vetter | 0ade638 | 2010-08-24 22:18:41 +0200 | [diff] [blame] | 41 | #include <drm/intel-gtt.h> |
Matthew Garrett | aaa6fd2 | 2011-08-12 12:11:33 +0200 | [diff] [blame] | 42 | #include <linux/backlight.h> |
Ben Widawsky | 2911a35 | 2012-04-05 14:47:36 -0700 | [diff] [blame] | 43 | #include <linux/intel-iommu.h> |
Daniel Vetter | 742cbee | 2012-04-27 15:17:39 +0200 | [diff] [blame] | 44 | #include <linux/kref.h> |
Daniel Vetter | 9ee32fea | 2012-12-01 13:53:48 +0100 | [diff] [blame] | 45 | #include <linux/pm_qos.h> |
Jesse Barnes | 585fb11 | 2008-07-29 11:54:06 -0700 | [diff] [blame] | 46 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | /* General customization: |
| 48 | */ |
| 49 | |
| 50 | #define DRIVER_AUTHOR "Tungsten Graphics, Inc." |
| 51 | |
| 52 | #define DRIVER_NAME "i915" |
| 53 | #define DRIVER_DESC "Intel Graphics" |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 54 | #define DRIVER_DATE "20080730" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
Jesse Barnes | 317c35d | 2008-08-25 15:11:06 -0700 | [diff] [blame] | 56 | enum pipe { |
| 57 | PIPE_A = 0, |
| 58 | PIPE_B, |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 59 | PIPE_C, |
| 60 | I915_MAX_PIPES |
Jesse Barnes | 317c35d | 2008-08-25 15:11:06 -0700 | [diff] [blame] | 61 | }; |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 62 | #define pipe_name(p) ((p) + 'A') |
Jesse Barnes | 317c35d | 2008-08-25 15:11:06 -0700 | [diff] [blame] | 63 | |
Paulo Zanoni | a5c961d | 2012-10-24 15:59:34 -0200 | [diff] [blame] | 64 | enum transcoder { |
| 65 | TRANSCODER_A = 0, |
| 66 | TRANSCODER_B, |
| 67 | TRANSCODER_C, |
| 68 | TRANSCODER_EDP = 0xF, |
| 69 | }; |
| 70 | #define transcoder_name(t) ((t) + 'A') |
| 71 | |
Jesse Barnes | 8082400 | 2009-09-10 15:28:06 -0700 | [diff] [blame] | 72 | enum plane { |
| 73 | PLANE_A = 0, |
| 74 | PLANE_B, |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 75 | PLANE_C, |
Jesse Barnes | 8082400 | 2009-09-10 15:28:06 -0700 | [diff] [blame] | 76 | }; |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 77 | #define plane_name(p) ((p) + 'A') |
Keith Packard | 5244021 | 2008-11-18 09:30:25 -0800 | [diff] [blame] | 78 | |
Ville Syrjälä | 06da8da | 2013-04-17 17:48:51 +0300 | [diff] [blame] | 79 | #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A') |
| 80 | |
Eugeni Dodonov | 2b13952 | 2012-03-29 12:32:22 -0300 | [diff] [blame] | 81 | enum port { |
| 82 | PORT_A = 0, |
| 83 | PORT_B, |
| 84 | PORT_C, |
| 85 | PORT_D, |
| 86 | PORT_E, |
| 87 | I915_MAX_PORTS |
| 88 | }; |
| 89 | #define port_name(p) ((p) + 'A') |
| 90 | |
Paulo Zanoni | b97186f | 2013-05-03 12:15:36 -0300 | [diff] [blame] | 91 | enum intel_display_power_domain { |
| 92 | POWER_DOMAIN_PIPE_A, |
| 93 | POWER_DOMAIN_PIPE_B, |
| 94 | POWER_DOMAIN_PIPE_C, |
| 95 | POWER_DOMAIN_PIPE_A_PANEL_FITTER, |
| 96 | POWER_DOMAIN_PIPE_B_PANEL_FITTER, |
| 97 | POWER_DOMAIN_PIPE_C_PANEL_FITTER, |
| 98 | POWER_DOMAIN_TRANSCODER_A, |
| 99 | POWER_DOMAIN_TRANSCODER_B, |
| 100 | POWER_DOMAIN_TRANSCODER_C, |
| 101 | POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF, |
| 102 | }; |
| 103 | |
| 104 | #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) |
| 105 | #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ |
| 106 | ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) |
| 107 | #define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A) |
| 108 | |
Egbert Eich | 1d843f9 | 2013-02-25 12:06:49 -0500 | [diff] [blame] | 109 | enum hpd_pin { |
| 110 | HPD_NONE = 0, |
| 111 | HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ |
| 112 | HPD_TV = HPD_NONE, /* TV is known to be unreliable */ |
| 113 | HPD_CRT, |
| 114 | HPD_SDVO_B, |
| 115 | HPD_SDVO_C, |
| 116 | HPD_PORT_B, |
| 117 | HPD_PORT_C, |
| 118 | HPD_PORT_D, |
| 119 | HPD_NUM_PINS |
| 120 | }; |
| 121 | |
Chris Wilson | 2a2d548 | 2012-12-03 11:49:06 +0000 | [diff] [blame] | 122 | #define I915_GEM_GPU_DOMAINS \ |
| 123 | (I915_GEM_DOMAIN_RENDER | \ |
| 124 | I915_GEM_DOMAIN_SAMPLER | \ |
| 125 | I915_GEM_DOMAIN_COMMAND | \ |
| 126 | I915_GEM_DOMAIN_INSTRUCTION | \ |
| 127 | I915_GEM_DOMAIN_VERTEX) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 128 | |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 129 | #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 130 | |
Daniel Vetter | 6c2b7c1 | 2012-07-05 09:50:24 +0200 | [diff] [blame] | 131 | #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ |
| 132 | list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ |
| 133 | if ((intel_encoder)->base.crtc == (__crtc)) |
| 134 | |
Daniel Vetter | e7b903d | 2013-06-05 13:34:14 +0200 | [diff] [blame] | 135 | struct drm_i915_private; |
| 136 | |
Daniel Vetter | e2b7826 | 2013-06-07 23:10:03 +0200 | [diff] [blame] | 137 | enum intel_dpll_id { |
| 138 | DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ |
| 139 | /* real shared dpll ids must be >= 0 */ |
| 140 | DPLL_ID_PCH_PLL_A, |
| 141 | DPLL_ID_PCH_PLL_B, |
| 142 | }; |
Jesse Barnes | ee7b9f9 | 2012-04-20 17:11:53 +0100 | [diff] [blame] | 143 | #define I915_NUM_PLLS 2 |
| 144 | |
Daniel Vetter | 5358901 | 2013-06-05 13:34:16 +0200 | [diff] [blame] | 145 | struct intel_dpll_hw_state { |
Daniel Vetter | 66e985c | 2013-06-05 13:34:20 +0200 | [diff] [blame] | 146 | uint32_t dpll; |
Daniel Vetter | 8bcc279 | 2013-06-05 13:34:28 +0200 | [diff] [blame] | 147 | uint32_t dpll_md; |
Daniel Vetter | 66e985c | 2013-06-05 13:34:20 +0200 | [diff] [blame] | 148 | uint32_t fp0; |
| 149 | uint32_t fp1; |
Daniel Vetter | 5358901 | 2013-06-05 13:34:16 +0200 | [diff] [blame] | 150 | }; |
| 151 | |
Daniel Vetter | 46edb02 | 2013-06-05 13:34:12 +0200 | [diff] [blame] | 152 | struct intel_shared_dpll { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | int refcount; /* count of number of CRTCs sharing this PLL */ |
| 154 | int active; /* count of number of active CRTCs (i.e. DPMS on) */ |
| 155 | bool on; /* is the PLL actually active? Disabled during modeset */ |
Daniel Vetter | 46edb02 | 2013-06-05 13:34:12 +0200 | [diff] [blame] | 156 | const char *name; |
| 157 | /* should match the index in the dev_priv->shared_dplls array */ |
| 158 | enum intel_dpll_id id; |
Daniel Vetter | 5358901 | 2013-06-05 13:34:16 +0200 | [diff] [blame] | 159 | struct intel_dpll_hw_state hw_state; |
Daniel Vetter | 15bdd4c | 2013-06-05 13:34:23 +0200 | [diff] [blame] | 160 | void (*mode_set)(struct drm_i915_private *dev_priv, |
| 161 | struct intel_shared_dpll *pll); |
Daniel Vetter | e7b903d | 2013-06-05 13:34:14 +0200 | [diff] [blame] | 162 | void (*enable)(struct drm_i915_private *dev_priv, |
| 163 | struct intel_shared_dpll *pll); |
| 164 | void (*disable)(struct drm_i915_private *dev_priv, |
| 165 | struct intel_shared_dpll *pll); |
Daniel Vetter | 5358901 | 2013-06-05 13:34:16 +0200 | [diff] [blame] | 166 | bool (*get_hw_state)(struct drm_i915_private *dev_priv, |
| 167 | struct intel_shared_dpll *pll, |
| 168 | struct intel_dpll_hw_state *hw_state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | |
Daniel Vetter | e69d0bc | 2012-11-29 15:59:36 +0100 | [diff] [blame] | 171 | /* Used by dp and fdi links */ |
| 172 | struct intel_link_m_n { |
| 173 | uint32_t tu; |
| 174 | uint32_t gmch_m; |
| 175 | uint32_t gmch_n; |
| 176 | uint32_t link_m; |
| 177 | uint32_t link_n; |
| 178 | }; |
| 179 | |
| 180 | void intel_link_compute_m_n(int bpp, int nlanes, |
| 181 | int pixel_clock, int link_clock, |
| 182 | struct intel_link_m_n *m_n); |
| 183 | |
Paulo Zanoni | 6441ab5 | 2012-10-05 12:05:58 -0300 | [diff] [blame] | 184 | struct intel_ddi_plls { |
| 185 | int spll_refcount; |
| 186 | int wrpll1_refcount; |
| 187 | int wrpll2_refcount; |
| 188 | }; |
| 189 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | /* Interface history: |
| 191 | * |
| 192 | * 1.1: Original. |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 193 | * 1.2: Add Power Management |
| 194 | * 1.3: Add vblank support |
Dave Airlie | de227f5 | 2006-01-25 15:31:43 +1100 | [diff] [blame] | 195 | * 1.4: Fix cmdbuffer path, add heap destroy |
Dave Airlie | 702880f | 2006-06-24 17:07:34 +1000 | [diff] [blame] | 196 | * 1.5: Add vblank pipe configuration |
=?utf-8?q?Michel_D=C3=A4nzer?= | 2228ed6 | 2006-10-25 01:05:09 +1000 | [diff] [blame] | 197 | * 1.6: - New ioctl for scheduling buffer swaps on vertical blank |
| 198 | * - Support vertical blank on secondary display pipe |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | */ |
| 200 | #define DRIVER_MAJOR 1 |
=?utf-8?q?Michel_D=C3=A4nzer?= | 2228ed6 | 2006-10-25 01:05:09 +1000 | [diff] [blame] | 201 | #define DRIVER_MINOR 6 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | #define DRIVER_PATCHLEVEL 0 |
| 203 | |
Chris Wilson | 23bc598 | 2010-09-29 16:10:57 +0100 | [diff] [blame] | 204 | #define WATCH_LISTS 0 |
Chris Wilson | 42d6ab4 | 2012-07-26 11:49:32 +0100 | [diff] [blame] | 205 | #define WATCH_GTT 0 |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 206 | |
Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 207 | #define I915_GEM_PHYS_CURSOR_0 1 |
| 208 | #define I915_GEM_PHYS_CURSOR_1 2 |
| 209 | #define I915_GEM_PHYS_OVERLAY_REGS 3 |
| 210 | #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) |
| 211 | |
| 212 | struct drm_i915_gem_phys_object { |
| 213 | int id; |
| 214 | struct page **page_list; |
| 215 | drm_dma_handle_t *handle; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 216 | struct drm_i915_gem_object *cur_obj; |
Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 217 | }; |
| 218 | |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 219 | struct opregion_header; |
| 220 | struct opregion_acpi; |
| 221 | struct opregion_swsci; |
| 222 | struct opregion_asle; |
| 223 | |
Matthew Garrett | 8ee1c3d | 2008-08-05 19:37:25 +0100 | [diff] [blame] | 224 | struct intel_opregion { |
Ben Widawsky | 5bc4418 | 2012-04-16 14:07:42 -0700 | [diff] [blame] | 225 | struct opregion_header __iomem *header; |
| 226 | struct opregion_acpi __iomem *acpi; |
| 227 | struct opregion_swsci __iomem *swsci; |
| 228 | struct opregion_asle __iomem *asle; |
| 229 | void __iomem *vbt; |
Chris Wilson | 01fe9db | 2011-01-16 19:37:30 +0000 | [diff] [blame] | 230 | u32 __iomem *lid_state; |
Matthew Garrett | 8ee1c3d | 2008-08-05 19:37:25 +0100 | [diff] [blame] | 231 | }; |
Chris Wilson | 44834a6 | 2010-08-19 16:09:23 +0100 | [diff] [blame] | 232 | #define OPREGION_SIZE (8*1024) |
Matthew Garrett | 8ee1c3d | 2008-08-05 19:37:25 +0100 | [diff] [blame] | 233 | |
Chris Wilson | 6ef3d42 | 2010-08-04 20:26:07 +0100 | [diff] [blame] | 234 | struct intel_overlay; |
| 235 | struct intel_overlay_error_state; |
| 236 | |
Dave Airlie | 7c1c287 | 2008-11-28 14:22:24 +1000 | [diff] [blame] | 237 | struct drm_i915_master_private { |
| 238 | drm_local_map_t *sarea; |
| 239 | struct _drm_i915_sarea *sarea_priv; |
| 240 | }; |
Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 241 | #define I915_FENCE_REG_NONE -1 |
Ville Syrjälä | 42b5aea | 2013-04-09 13:02:47 +0300 | [diff] [blame] | 242 | #define I915_MAX_NUM_FENCES 32 |
| 243 | /* 32 fences + sign bit for FENCE_REG_NONE */ |
| 244 | #define I915_MAX_NUM_FENCE_BITS 6 |
Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 245 | |
| 246 | struct drm_i915_fence_reg { |
Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 247 | struct list_head lru_list; |
Chris Wilson | caea747 | 2010-11-12 13:53:37 +0000 | [diff] [blame] | 248 | struct drm_i915_gem_object *obj; |
Chris Wilson | 1690e1e | 2011-12-14 13:57:08 +0100 | [diff] [blame] | 249 | int pin_count; |
Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 250 | }; |
Dave Airlie | 7c1c287 | 2008-11-28 14:22:24 +1000 | [diff] [blame] | 251 | |
yakui_zhao | 9b9d172 | 2009-05-31 17:17:17 +0800 | [diff] [blame] | 252 | struct sdvo_device_mapping { |
Chris Wilson | e957d77 | 2010-09-24 12:52:03 +0100 | [diff] [blame] | 253 | u8 initialized; |
yakui_zhao | 9b9d172 | 2009-05-31 17:17:17 +0800 | [diff] [blame] | 254 | u8 dvo_port; |
| 255 | u8 slave_addr; |
| 256 | u8 dvo_wiring; |
Chris Wilson | e957d77 | 2010-09-24 12:52:03 +0100 | [diff] [blame] | 257 | u8 i2c_pin; |
Adam Jackson | b108333 | 2010-04-23 16:07:40 -0400 | [diff] [blame] | 258 | u8 ddc_pin; |
yakui_zhao | 9b9d172 | 2009-05-31 17:17:17 +0800 | [diff] [blame] | 259 | }; |
| 260 | |
Chris Wilson | c4a1d9e | 2010-11-21 13:12:35 +0000 | [diff] [blame] | 261 | struct intel_display_error_state; |
| 262 | |
Jesse Barnes | 63eeaf3 | 2009-06-18 16:56:52 -0700 | [diff] [blame] | 263 | struct drm_i915_error_state { |
Daniel Vetter | 742cbee | 2012-04-27 15:17:39 +0200 | [diff] [blame] | 264 | struct kref ref; |
Jesse Barnes | 63eeaf3 | 2009-06-18 16:56:52 -0700 | [diff] [blame] | 265 | u32 eir; |
| 266 | u32 pgtbl_er; |
Ben Widawsky | be998e2 | 2012-04-26 16:03:00 -0700 | [diff] [blame] | 267 | u32 ier; |
Ben Widawsky | b9a3906 | 2012-06-04 14:42:52 -0700 | [diff] [blame] | 268 | u32 ccid; |
Chris Wilson | 0f3b684 | 2013-01-15 12:05:55 +0000 | [diff] [blame] | 269 | u32 derrmr; |
| 270 | u32 forcewake; |
Ben Widawsky | 9574b3f | 2012-04-26 16:03:01 -0700 | [diff] [blame] | 271 | bool waiting[I915_NUM_RINGS]; |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 272 | u32 pipestat[I915_MAX_PIPES]; |
Daniel Vetter | c1cd90e | 2011-12-14 13:57:02 +0100 | [diff] [blame] | 273 | u32 tail[I915_NUM_RINGS]; |
| 274 | u32 head[I915_NUM_RINGS]; |
Chris Wilson | 0f3b684 | 2013-01-15 12:05:55 +0000 | [diff] [blame] | 275 | u32 ctl[I915_NUM_RINGS]; |
Daniel Vetter | d27b1e0 | 2011-12-14 13:57:01 +0100 | [diff] [blame] | 276 | u32 ipeir[I915_NUM_RINGS]; |
| 277 | u32 ipehr[I915_NUM_RINGS]; |
| 278 | u32 instdone[I915_NUM_RINGS]; |
| 279 | u32 acthd[I915_NUM_RINGS]; |
Daniel Vetter | 7e3b873 | 2012-02-01 22:26:45 +0100 | [diff] [blame] | 280 | u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; |
Chris Wilson | df2b23d | 2012-11-27 17:06:54 +0000 | [diff] [blame] | 281 | u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1]; |
Chris Wilson | 12f5581 | 2012-07-05 17:14:01 +0100 | [diff] [blame] | 282 | u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ |
Daniel Vetter | 7e3b873 | 2012-02-01 22:26:45 +0100 | [diff] [blame] | 283 | /* our own tracking of ring head and tail */ |
| 284 | u32 cpu_ring_head[I915_NUM_RINGS]; |
| 285 | u32 cpu_ring_tail[I915_NUM_RINGS]; |
Chris Wilson | 1d8f38f | 2010-10-29 19:00:51 +0100 | [diff] [blame] | 286 | u32 error; /* gen6+ */ |
Ben Widawsky | 71e172e | 2012-08-20 16:15:13 -0700 | [diff] [blame] | 287 | u32 err_int; /* gen7 */ |
Daniel Vetter | c1cd90e | 2011-12-14 13:57:02 +0100 | [diff] [blame] | 288 | u32 instpm[I915_NUM_RINGS]; |
| 289 | u32 instps[I915_NUM_RINGS]; |
Ben Widawsky | 050ee91 | 2012-08-22 11:32:15 -0700 | [diff] [blame] | 290 | u32 extra_instdone[I915_NUM_INSTDONE_REG]; |
Daniel Vetter | d27b1e0 | 2011-12-14 13:57:01 +0100 | [diff] [blame] | 291 | u32 seqno[I915_NUM_RINGS]; |
Chris Wilson | 9df3079 | 2010-02-18 10:24:56 +0000 | [diff] [blame] | 292 | u64 bbaddr; |
Daniel Vetter | 33f3f51 | 2011-12-14 13:57:39 +0100 | [diff] [blame] | 293 | u32 fault_reg[I915_NUM_RINGS]; |
| 294 | u32 done_reg; |
Daniel Vetter | c1cd90e | 2011-12-14 13:57:02 +0100 | [diff] [blame] | 295 | u32 faddr[I915_NUM_RINGS]; |
Daniel Vetter | 4b9de73 | 2011-10-09 21:52:02 +0200 | [diff] [blame] | 296 | u64 fence[I915_MAX_NUM_FENCES]; |
Jesse Barnes | 63eeaf3 | 2009-06-18 16:56:52 -0700 | [diff] [blame] | 297 | struct timeval time; |
Chris Wilson | 52d39a2 | 2012-02-15 11:25:37 +0000 | [diff] [blame] | 298 | struct drm_i915_error_ring { |
| 299 | struct drm_i915_error_object { |
| 300 | int page_count; |
| 301 | u32 gtt_offset; |
| 302 | u32 *pages[0]; |
Ben Widawsky | 8c123e5 | 2013-03-04 17:00:29 -0800 | [diff] [blame] | 303 | } *ringbuffer, *batchbuffer, *ctx; |
Chris Wilson | 52d39a2 | 2012-02-15 11:25:37 +0000 | [diff] [blame] | 304 | struct drm_i915_error_request { |
| 305 | long jiffies; |
| 306 | u32 seqno; |
Chris Wilson | ee4f42b | 2012-02-15 11:25:38 +0000 | [diff] [blame] | 307 | u32 tail; |
Chris Wilson | 52d39a2 | 2012-02-15 11:25:37 +0000 | [diff] [blame] | 308 | } *requests; |
| 309 | int num_requests; |
| 310 | } ring[I915_NUM_RINGS]; |
Chris Wilson | 9df3079 | 2010-02-18 10:24:56 +0000 | [diff] [blame] | 311 | struct drm_i915_error_buffer { |
Chris Wilson | a779e5a | 2011-01-09 21:07:49 +0000 | [diff] [blame] | 312 | u32 size; |
Chris Wilson | 9df3079 | 2010-02-18 10:24:56 +0000 | [diff] [blame] | 313 | u32 name; |
Chris Wilson | 0201f1e | 2012-07-20 12:41:01 +0100 | [diff] [blame] | 314 | u32 rseqno, wseqno; |
Chris Wilson | 9df3079 | 2010-02-18 10:24:56 +0000 | [diff] [blame] | 315 | u32 gtt_offset; |
| 316 | u32 read_domains; |
| 317 | u32 write_domain; |
Daniel Vetter | 4b9de73 | 2011-10-09 21:52:02 +0200 | [diff] [blame] | 318 | s32 fence_reg:I915_MAX_NUM_FENCE_BITS; |
Chris Wilson | 9df3079 | 2010-02-18 10:24:56 +0000 | [diff] [blame] | 319 | s32 pinned:2; |
| 320 | u32 tiling:2; |
| 321 | u32 dirty:1; |
| 322 | u32 purgeable:1; |
Daniel Vetter | 5d1333f | 2012-02-16 11:03:29 +0100 | [diff] [blame] | 323 | s32 ring:4; |
Chris Wilson | 93dfb40 | 2011-03-29 16:59:50 -0700 | [diff] [blame] | 324 | u32 cache_level:2; |
Ben Widawsky | 95f5301 | 2013-07-31 17:00:15 -0700 | [diff] [blame] | 325 | } **active_bo, **pinned_bo; |
| 326 | u32 *active_bo_count, *pinned_bo_count; |
Chris Wilson | 6ef3d42 | 2010-08-04 20:26:07 +0100 | [diff] [blame] | 327 | struct intel_overlay_error_state *overlay; |
Chris Wilson | c4a1d9e | 2010-11-21 13:12:35 +0000 | [diff] [blame] | 328 | struct intel_display_error_state *display; |
Jesse Barnes | 63eeaf3 | 2009-06-18 16:56:52 -0700 | [diff] [blame] | 329 | }; |
| 330 | |
Daniel Vetter | b8cecdf | 2013-03-27 00:44:50 +0100 | [diff] [blame] | 331 | struct intel_crtc_config; |
Daniel Vetter | 0e8ffe1 | 2013-03-28 10:42:00 +0100 | [diff] [blame] | 332 | struct intel_crtc; |
Daniel Vetter | ee9300b | 2013-06-03 22:40:22 +0200 | [diff] [blame] | 333 | struct intel_limit; |
| 334 | struct dpll; |
Daniel Vetter | b8cecdf | 2013-03-27 00:44:50 +0100 | [diff] [blame] | 335 | |
Jesse Barnes | e70236a | 2009-09-21 10:42:27 -0700 | [diff] [blame] | 336 | struct drm_i915_display_funcs { |
Adam Jackson | ee5382a | 2010-04-23 11:17:39 -0400 | [diff] [blame] | 337 | bool (*fbc_enabled)(struct drm_device *dev); |
Jesse Barnes | e70236a | 2009-09-21 10:42:27 -0700 | [diff] [blame] | 338 | void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); |
| 339 | void (*disable_fbc)(struct drm_device *dev); |
| 340 | int (*get_display_clock_speed)(struct drm_device *dev); |
| 341 | int (*get_fifo_size)(struct drm_device *dev, int plane); |
Daniel Vetter | ee9300b | 2013-06-03 22:40:22 +0200 | [diff] [blame] | 342 | /** |
| 343 | * find_dpll() - Find the best values for the PLL |
| 344 | * @limit: limits for the PLL |
| 345 | * @crtc: current CRTC |
| 346 | * @target: target frequency in kHz |
| 347 | * @refclk: reference clock frequency in kHz |
| 348 | * @match_clock: if provided, @best_clock P divider must |
| 349 | * match the P divider from @match_clock |
| 350 | * used for LVDS downclocking |
| 351 | * @best_clock: best PLL values found |
| 352 | * |
| 353 | * Returns true on success, false on failure. |
| 354 | */ |
| 355 | bool (*find_dpll)(const struct intel_limit *limit, |
| 356 | struct drm_crtc *crtc, |
| 357 | int target, int refclk, |
| 358 | struct dpll *match_clock, |
| 359 | struct dpll *best_clock); |
Chris Wilson | d210246 | 2011-01-24 17:43:27 +0000 | [diff] [blame] | 360 | void (*update_wm)(struct drm_device *dev); |
Ville Syrjälä | adf3d35 | 2013-08-06 22:24:11 +0300 | [diff] [blame] | 361 | void (*update_sprite_wm)(struct drm_plane *plane, |
| 362 | struct drm_crtc *crtc, |
Paulo Zanoni | 4c4ff43 | 2013-05-24 11:59:17 -0300 | [diff] [blame] | 363 | uint32_t sprite_width, int pixel_size, |
Ville Syrjälä | bdd57d0 | 2013-07-05 11:57:13 +0300 | [diff] [blame] | 364 | bool enable, bool scaled); |
Daniel Vetter | 47fab73 | 2012-10-26 10:58:18 +0200 | [diff] [blame] | 365 | void (*modeset_global_resources)(struct drm_device *dev); |
Daniel Vetter | 0e8ffe1 | 2013-03-28 10:42:00 +0100 | [diff] [blame] | 366 | /* Returns the active state of the crtc, and if the crtc is active, |
| 367 | * fills out the pipe-config with the hw state. */ |
| 368 | bool (*get_pipe_config)(struct intel_crtc *, |
| 369 | struct intel_crtc_config *); |
Jesse Barnes | f1f644d | 2013-06-27 00:39:25 +0300 | [diff] [blame] | 370 | void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *); |
Eric Anholt | f564048e | 2011-03-30 13:01:02 -0700 | [diff] [blame] | 371 | int (*crtc_mode_set)(struct drm_crtc *crtc, |
Eric Anholt | f564048e | 2011-03-30 13:01:02 -0700 | [diff] [blame] | 372 | int x, int y, |
| 373 | struct drm_framebuffer *old_fb); |
Daniel Vetter | 76e5a89 | 2012-06-29 22:39:33 +0200 | [diff] [blame] | 374 | void (*crtc_enable)(struct drm_crtc *crtc); |
| 375 | void (*crtc_disable)(struct drm_crtc *crtc); |
Jesse Barnes | ee7b9f9 | 2012-04-20 17:11:53 +0100 | [diff] [blame] | 376 | void (*off)(struct drm_crtc *crtc); |
Wu Fengguang | e0dac65 | 2011-09-05 14:25:34 +0800 | [diff] [blame] | 377 | void (*write_eld)(struct drm_connector *connector, |
| 378 | struct drm_crtc *crtc); |
Jesse Barnes | 674cf96 | 2011-04-28 14:27:04 -0700 | [diff] [blame] | 379 | void (*fdi_link_train)(struct drm_crtc *crtc); |
Jesse Barnes | 6067aae | 2011-04-28 15:04:31 -0700 | [diff] [blame] | 380 | void (*init_clock_gating)(struct drm_device *dev); |
Jesse Barnes | 8c9f3aa | 2011-06-16 09:19:13 -0700 | [diff] [blame] | 381 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
| 382 | struct drm_framebuffer *fb, |
Keith Packard | ed8d197 | 2013-07-22 18:49:58 -0700 | [diff] [blame] | 383 | struct drm_i915_gem_object *obj, |
| 384 | uint32_t flags); |
Jesse Barnes | 17638cd | 2011-06-24 12:19:23 -0700 | [diff] [blame] | 385 | int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
| 386 | int x, int y); |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 387 | void (*hpd_irq_setup)(struct drm_device *dev); |
Jesse Barnes | e70236a | 2009-09-21 10:42:27 -0700 | [diff] [blame] | 388 | /* clock updates for mode set */ |
| 389 | /* cursor updates */ |
| 390 | /* render clock increase/decrease */ |
| 391 | /* display clock increase/decrease */ |
| 392 | /* pll clock increase/decrease */ |
Jesse Barnes | e70236a | 2009-09-21 10:42:27 -0700 | [diff] [blame] | 393 | }; |
| 394 | |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 395 | struct intel_uncore_funcs { |
Chris Wilson | 990bbda | 2012-07-02 11:51:02 -0300 | [diff] [blame] | 396 | void (*force_wake_get)(struct drm_i915_private *dev_priv); |
| 397 | void (*force_wake_put)(struct drm_i915_private *dev_priv); |
| 398 | }; |
| 399 | |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 400 | struct intel_uncore { |
| 401 | spinlock_t lock; /** lock is also taken in irq contexts. */ |
| 402 | |
| 403 | struct intel_uncore_funcs funcs; |
| 404 | |
| 405 | unsigned fifo_count; |
| 406 | unsigned forcewake_count; |
| 407 | }; |
| 408 | |
Damien Lespiau | 79fc46d | 2013-04-23 16:37:17 +0100 | [diff] [blame] | 409 | #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ |
| 410 | func(is_mobile) sep \ |
| 411 | func(is_i85x) sep \ |
| 412 | func(is_i915g) sep \ |
| 413 | func(is_i945gm) sep \ |
| 414 | func(is_g33) sep \ |
| 415 | func(need_gfx_hws) sep \ |
| 416 | func(is_g4x) sep \ |
| 417 | func(is_pineview) sep \ |
| 418 | func(is_broadwater) sep \ |
| 419 | func(is_crestline) sep \ |
| 420 | func(is_ivybridge) sep \ |
| 421 | func(is_valleyview) sep \ |
| 422 | func(is_haswell) sep \ |
| 423 | func(has_force_wake) sep \ |
| 424 | func(has_fbc) sep \ |
| 425 | func(has_pipe_cxsr) sep \ |
| 426 | func(has_hotplug) sep \ |
| 427 | func(cursor_needs_physical) sep \ |
| 428 | func(has_overlay) sep \ |
| 429 | func(overlay_needs_physical) sep \ |
| 430 | func(supports_tv) sep \ |
| 431 | func(has_bsd_ring) sep \ |
| 432 | func(has_blt_ring) sep \ |
Xiang, Haihao | f72a118 | 2013-05-28 19:22:22 -0700 | [diff] [blame] | 433 | func(has_vebox_ring) sep \ |
Damien Lespiau | dd93be5 | 2013-04-22 18:40:39 +0100 | [diff] [blame] | 434 | func(has_llc) sep \ |
Damien Lespiau | 30568c4 | 2013-04-22 18:40:41 +0100 | [diff] [blame] | 435 | func(has_ddi) sep \ |
| 436 | func(has_fpga_dbg) |
Daniel Vetter | c96ea64 | 2012-08-08 22:01:51 +0200 | [diff] [blame] | 437 | |
Damien Lespiau | a587f77 | 2013-04-22 18:40:38 +0100 | [diff] [blame] | 438 | #define DEFINE_FLAG(name) u8 name:1 |
| 439 | #define SEP_SEMICOLON ; |
Eugeni Dodonov | 3d29b84 | 2012-01-17 14:43:53 -0200 | [diff] [blame] | 440 | |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 441 | struct intel_device_info { |
Ville Syrjälä | 10fce67 | 2013-01-24 15:29:28 +0200 | [diff] [blame] | 442 | u32 display_mmio_offset; |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 443 | u8 num_pipes:3; |
=?utf-8?q?Michel_D=C3=A4nzer?= | a6b54f3 | 2006-10-24 23:37:43 +1000 | [diff] [blame] | 444 | u8 gen; |
Damien Lespiau | a587f77 | 2013-04-22 18:40:38 +0100 | [diff] [blame] | 445 | DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); |
Kristian Høgsberg | cfdf1fa | 2009-12-16 15:16:16 -0500 | [diff] [blame] | 446 | }; |
| 447 | |
Damien Lespiau | a587f77 | 2013-04-22 18:40:38 +0100 | [diff] [blame] | 448 | #undef DEFINE_FLAG |
| 449 | #undef SEP_SEMICOLON |
| 450 | |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 451 | enum i915_cache_level { |
| 452 | I915_CACHE_NONE = 0, |
Chris Wilson | 350ec88 | 2013-08-06 13:17:02 +0100 | [diff] [blame] | 453 | I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ |
| 454 | I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc |
| 455 | caches, eg sampler/render caches, and the |
| 456 | large Last-Level-Cache. LLC is coherent with |
| 457 | the CPU, but L3 is only visible to the GPU. */ |
Chris Wilson | 651d794 | 2013-08-08 14:41:10 +0100 | [diff] [blame] | 458 | I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 459 | }; |
| 460 | |
Kenneth Graunke | 2d04bef | 2013-04-22 00:53:49 -0700 | [diff] [blame] | 461 | typedef uint32_t gen6_gtt_pte_t; |
| 462 | |
Ben Widawsky | 853ba5d | 2013-07-16 16:50:05 -0700 | [diff] [blame] | 463 | struct i915_address_space { |
Ben Widawsky | 93bd864 | 2013-07-16 16:50:06 -0700 | [diff] [blame] | 464 | struct drm_mm mm; |
Ben Widawsky | 853ba5d | 2013-07-16 16:50:05 -0700 | [diff] [blame] | 465 | struct drm_device *dev; |
Ben Widawsky | a7bbbd6 | 2013-07-16 16:50:07 -0700 | [diff] [blame] | 466 | struct list_head global_link; |
Ben Widawsky | 853ba5d | 2013-07-16 16:50:05 -0700 | [diff] [blame] | 467 | unsigned long start; /* Start offset always 0 for dri2 */ |
| 468 | size_t total; /* size addr space maps (ex. 2GB for ggtt) */ |
| 469 | |
| 470 | struct { |
| 471 | dma_addr_t addr; |
| 472 | struct page *page; |
| 473 | } scratch; |
| 474 | |
Ben Widawsky | 5cef07e | 2013-07-16 16:50:08 -0700 | [diff] [blame] | 475 | /** |
| 476 | * List of objects currently involved in rendering. |
| 477 | * |
| 478 | * Includes buffers having the contents of their GPU caches |
| 479 | * flushed, not necessarily primitives. last_rendering_seqno |
| 480 | * represents when the rendering involved will be completed. |
| 481 | * |
| 482 | * A reference is held on the buffer while on this list. |
| 483 | */ |
| 484 | struct list_head active_list; |
| 485 | |
| 486 | /** |
| 487 | * LRU list of objects which are not in the ringbuffer and |
| 488 | * are ready to unbind, but are still in the GTT. |
| 489 | * |
| 490 | * last_rendering_seqno is 0 while an object is in this list. |
| 491 | * |
| 492 | * A reference is not held on the buffer while on this list, |
| 493 | * as merely being GTT-bound shouldn't prevent its being |
| 494 | * freed, and we'll pull it off the list in the free path. |
| 495 | */ |
| 496 | struct list_head inactive_list; |
| 497 | |
Ben Widawsky | 853ba5d | 2013-07-16 16:50:05 -0700 | [diff] [blame] | 498 | /* FIXME: Need a more generic return type */ |
| 499 | gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, |
| 500 | enum i915_cache_level level); |
| 501 | void (*clear_range)(struct i915_address_space *vm, |
| 502 | unsigned int first_entry, |
| 503 | unsigned int num_entries); |
| 504 | void (*insert_entries)(struct i915_address_space *vm, |
| 505 | struct sg_table *st, |
| 506 | unsigned int first_entry, |
| 507 | enum i915_cache_level cache_level); |
| 508 | void (*cleanup)(struct i915_address_space *vm); |
| 509 | }; |
| 510 | |
Ben Widawsky | 5d4545a | 2013-01-17 12:45:15 -0800 | [diff] [blame] | 511 | /* The Graphics Translation Table is the way in which GEN hardware translates a |
| 512 | * Graphics Virtual Address into a Physical Address. In addition to the normal |
| 513 | * collateral associated with any va->pa translations GEN hardware also has a |
| 514 | * portion of the GTT which can be mapped by the CPU and remain both coherent |
| 515 | * and correct (in cases like swizzling). That region is referred to as GMADR in |
| 516 | * the spec. |
| 517 | */ |
| 518 | struct i915_gtt { |
Ben Widawsky | 853ba5d | 2013-07-16 16:50:05 -0700 | [diff] [blame] | 519 | struct i915_address_space base; |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 520 | size_t stolen_size; /* Total size of stolen memory */ |
Ben Widawsky | 5d4545a | 2013-01-17 12:45:15 -0800 | [diff] [blame] | 521 | |
| 522 | unsigned long mappable_end; /* End offset that we can CPU map */ |
| 523 | struct io_mapping *mappable; /* Mapping to our CPU mappable region */ |
| 524 | phys_addr_t mappable_base; /* PA of our GMADR */ |
| 525 | |
| 526 | /** "Graphics Stolen Memory" holds the global PTEs */ |
| 527 | void __iomem *gsm; |
Ben Widawsky | a81cc00 | 2013-01-18 12:30:31 -0800 | [diff] [blame] | 528 | |
| 529 | bool do_idle_maps; |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 530 | |
Ben Widawsky | 911bdf0 | 2013-06-27 16:30:23 -0700 | [diff] [blame] | 531 | int mtrr; |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 532 | |
| 533 | /* global gtt ops */ |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 534 | int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, |
Ben Widawsky | 41907dd | 2013-02-08 11:32:47 -0800 | [diff] [blame] | 535 | size_t *stolen, phys_addr_t *mappable_base, |
| 536 | unsigned long *mappable_end); |
Ben Widawsky | 5d4545a | 2013-01-17 12:45:15 -0800 | [diff] [blame] | 537 | }; |
Ben Widawsky | 853ba5d | 2013-07-16 16:50:05 -0700 | [diff] [blame] | 538 | #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) |
Ben Widawsky | 5d4545a | 2013-01-17 12:45:15 -0800 | [diff] [blame] | 539 | |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 540 | struct i915_hw_ppgtt { |
Ben Widawsky | 853ba5d | 2013-07-16 16:50:05 -0700 | [diff] [blame] | 541 | struct i915_address_space base; |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 542 | unsigned num_pd_entries; |
| 543 | struct page **pt_pages; |
| 544 | uint32_t pd_offset; |
| 545 | dma_addr_t *pt_dma_addr; |
Daniel Vetter | def886c | 2013-01-24 14:44:56 -0800 | [diff] [blame] | 546 | |
Ben Widawsky | b7c36d2 | 2013-04-08 18:43:56 -0700 | [diff] [blame] | 547 | int (*enable)(struct drm_device *dev); |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 548 | }; |
| 549 | |
Ben Widawsky | 0b02e79 | 2013-07-31 17:00:08 -0700 | [diff] [blame] | 550 | /** |
| 551 | * A VMA represents a GEM BO that is bound into an address space. Therefore, a |
| 552 | * VMA's presence cannot be guaranteed before binding, or after unbinding the |
| 553 | * object into/from the address space. |
| 554 | * |
| 555 | * To make things as simple as possible (ie. no refcounting), a VMA's lifetime |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 556 | * will always be <= an objects lifetime. So object refcounting should cover us. |
| 557 | */ |
| 558 | struct i915_vma { |
| 559 | struct drm_mm_node node; |
| 560 | struct drm_i915_gem_object *obj; |
| 561 | struct i915_address_space *vm; |
| 562 | |
Ben Widawsky | ca191b1 | 2013-07-31 17:00:14 -0700 | [diff] [blame] | 563 | /** This object's place on the active/inactive lists */ |
| 564 | struct list_head mm_list; |
| 565 | |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 566 | struct list_head vma_link; /* Link in the object's VMA list */ |
Ben Widawsky | 82a55ad | 2013-08-14 11:38:34 +0200 | [diff] [blame] | 567 | |
| 568 | /** This vma's place in the batchbuffer or on the eviction list */ |
| 569 | struct list_head exec_list; |
| 570 | |
Ben Widawsky | 27173f1 | 2013-08-14 11:38:36 +0200 | [diff] [blame] | 571 | /** |
| 572 | * Used for performing relocations during execbuffer insertion. |
| 573 | */ |
| 574 | struct hlist_node exec_node; |
| 575 | unsigned long exec_handle; |
| 576 | struct drm_i915_gem_exec_object2 *exec_entry; |
| 577 | |
Daniel Vetter | 02e792f | 2009-09-15 22:57:34 +0200 | [diff] [blame] | 578 | }; |
| 579 | |
Mika Kuoppala | e59ec13 | 2013-06-12 12:35:28 +0300 | [diff] [blame] | 580 | struct i915_ctx_hang_stats { |
| 581 | /* This context had batch pending when hang was declared */ |
| 582 | unsigned batch_pending; |
| 583 | |
| 584 | /* This context had batch active when hang was declared */ |
| 585 | unsigned batch_active; |
| 586 | }; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 587 | |
| 588 | /* This must match up with the value previously used for execbuf2.rsvd1. */ |
| 589 | #define DEFAULT_CONTEXT_ID 0 |
| 590 | struct i915_hw_context { |
Mika Kuoppala | dce3271 | 2013-04-30 13:30:33 +0300 | [diff] [blame] | 591 | struct kref ref; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 592 | int id; |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 593 | bool is_initialized; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 594 | struct drm_i915_file_private *file_priv; |
| 595 | struct intel_ring_buffer *ring; |
| 596 | struct drm_i915_gem_object *obj; |
Mika Kuoppala | e59ec13 | 2013-06-12 12:35:28 +0300 | [diff] [blame] | 597 | struct i915_ctx_hang_stats hang_stats; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 598 | }; |
| 599 | |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 600 | struct i915_fbc { |
| 601 | unsigned long size; |
| 602 | unsigned int fb_id; |
| 603 | enum plane plane; |
| 604 | int y; |
| 605 | |
| 606 | struct drm_mm_node *compressed_fb; |
| 607 | struct drm_mm_node *compressed_llb; |
| 608 | |
| 609 | struct intel_fbc_work { |
| 610 | struct delayed_work work; |
| 611 | struct drm_crtc *crtc; |
| 612 | struct drm_framebuffer *fb; |
| 613 | int interval; |
| 614 | } *fbc_work; |
| 615 | |
Chris Wilson | 29ebf90 | 2013-07-27 17:23:55 +0100 | [diff] [blame] | 616 | enum no_fbc_reason { |
| 617 | FBC_OK, /* FBC is enabled */ |
| 618 | FBC_UNSUPPORTED, /* FBC is not supported by this chipset */ |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 619 | FBC_NO_OUTPUT, /* no outputs enabled to compress */ |
| 620 | FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */ |
| 621 | FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ |
| 622 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ |
| 623 | FBC_BAD_PLANE, /* fbc not supported on plane */ |
| 624 | FBC_NOT_TILED, /* buffer not tiled */ |
| 625 | FBC_MULTIPLE_PIPES, /* more than one pipe active */ |
| 626 | FBC_MODULE_PARAM, |
| 627 | FBC_CHIP_DEFAULT, /* disabled by default on this chip */ |
| 628 | } no_fbc_reason; |
Jesse Barnes | b5e50c3 | 2010-02-05 12:42:41 -0800 | [diff] [blame] | 629 | }; |
| 630 | |
Rodrigo Vivi | 3f51e47 | 2013-07-11 18:45:00 -0300 | [diff] [blame] | 631 | enum no_psr_reason { |
| 632 | PSR_NO_SOURCE, /* Not supported on platform */ |
| 633 | PSR_NO_SINK, /* Not supported by panel */ |
Rodrigo Vivi | 105b7c1 | 2013-07-11 18:45:02 -0300 | [diff] [blame] | 634 | PSR_MODULE_PARAM, |
Rodrigo Vivi | 3f51e47 | 2013-07-11 18:45:00 -0300 | [diff] [blame] | 635 | PSR_CRTC_NOT_ACTIVE, |
| 636 | PSR_PWR_WELL_ENABLED, |
| 637 | PSR_NOT_TILED, |
| 638 | PSR_SPRITE_ENABLED, |
| 639 | PSR_S3D_ENABLED, |
| 640 | PSR_INTERLACED_ENABLED, |
| 641 | PSR_HSW_NOT_DDIA, |
| 642 | }; |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 643 | |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 644 | enum intel_pch { |
Paulo Zanoni | f035083 | 2012-07-03 18:48:16 -0300 | [diff] [blame] | 645 | PCH_NONE = 0, /* No PCH present */ |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 646 | PCH_IBX, /* Ibexpeak PCH */ |
| 647 | PCH_CPT, /* Cougarpoint PCH */ |
Eugeni Dodonov | eb877eb | 2012-03-29 12:32:20 -0300 | [diff] [blame] | 648 | PCH_LPT, /* Lynxpoint PCH */ |
Ben Widawsky | 40c7ead | 2013-04-05 13:12:40 -0700 | [diff] [blame] | 649 | PCH_NOP, |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 650 | }; |
| 651 | |
Paulo Zanoni | 988d6ee | 2012-12-01 12:04:24 -0200 | [diff] [blame] | 652 | enum intel_sbi_destination { |
| 653 | SBI_ICLK, |
| 654 | SBI_MPHY, |
| 655 | }; |
| 656 | |
Jesse Barnes | b690e96 | 2010-07-19 13:53:12 -0700 | [diff] [blame] | 657 | #define QUIRK_PIPEA_FORCE (1<<0) |
Keith Packard | 435793d | 2011-07-12 14:56:22 -0700 | [diff] [blame] | 658 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
Carsten Emde | 4dca20e | 2012-03-15 15:56:26 +0100 | [diff] [blame] | 659 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) |
Kamal Mostafa | e85843b | 2013-07-19 15:02:01 -0700 | [diff] [blame] | 660 | #define QUIRK_NO_PCH_PWM_ENABLE (1<<3) |
Jesse Barnes | b690e96 | 2010-07-19 13:53:12 -0700 | [diff] [blame] | 661 | |
Dave Airlie | 8be48d9 | 2010-03-30 05:34:14 +0000 | [diff] [blame] | 662 | struct intel_fbdev; |
Chris Wilson | 1630fe7 | 2011-07-08 12:22:42 +0100 | [diff] [blame] | 663 | struct intel_fbc_work; |
Dave Airlie | 3865167 | 2010-03-30 05:34:13 +0000 | [diff] [blame] | 664 | |
Daniel Vetter | c2b9152 | 2012-02-14 22:37:19 +0100 | [diff] [blame] | 665 | struct intel_gmbus { |
| 666 | struct i2c_adapter adapter; |
Chris Wilson | f2ce9fa | 2012-11-10 15:58:21 +0000 | [diff] [blame] | 667 | u32 force_bit; |
Daniel Vetter | c2b9152 | 2012-02-14 22:37:19 +0100 | [diff] [blame] | 668 | u32 reg0; |
Daniel Vetter | 36c785f | 2012-02-14 22:37:22 +0100 | [diff] [blame] | 669 | u32 gpio_reg; |
Daniel Vetter | c167a6f | 2012-02-28 00:43:09 +0100 | [diff] [blame] | 670 | struct i2c_algo_bit_data bit_algo; |
Daniel Vetter | c2b9152 | 2012-02-14 22:37:19 +0100 | [diff] [blame] | 671 | struct drm_i915_private *dev_priv; |
| 672 | }; |
| 673 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 674 | struct i915_suspend_saved_registers { |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 675 | u8 saveLBB; |
| 676 | u32 saveDSPACNTR; |
| 677 | u32 saveDSPBCNTR; |
Keith Packard | e948e99 | 2008-05-07 12:27:53 +1000 | [diff] [blame] | 678 | u32 saveDSPARB; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 679 | u32 savePIPEACONF; |
| 680 | u32 savePIPEBCONF; |
| 681 | u32 savePIPEASRC; |
| 682 | u32 savePIPEBSRC; |
| 683 | u32 saveFPA0; |
| 684 | u32 saveFPA1; |
| 685 | u32 saveDPLL_A; |
| 686 | u32 saveDPLL_A_MD; |
| 687 | u32 saveHTOTAL_A; |
| 688 | u32 saveHBLANK_A; |
| 689 | u32 saveHSYNC_A; |
| 690 | u32 saveVTOTAL_A; |
| 691 | u32 saveVBLANK_A; |
| 692 | u32 saveVSYNC_A; |
| 693 | u32 saveBCLRPAT_A; |
Zhenyu Wang | 5586c8b | 2009-11-06 02:13:02 +0000 | [diff] [blame] | 694 | u32 saveTRANSACONF; |
Zhenyu Wang | 4204878 | 2009-10-21 15:27:01 +0800 | [diff] [blame] | 695 | u32 saveTRANS_HTOTAL_A; |
| 696 | u32 saveTRANS_HBLANK_A; |
| 697 | u32 saveTRANS_HSYNC_A; |
| 698 | u32 saveTRANS_VTOTAL_A; |
| 699 | u32 saveTRANS_VBLANK_A; |
| 700 | u32 saveTRANS_VSYNC_A; |
Jesse Barnes | 0da3ea1 | 2008-02-20 09:39:58 +1000 | [diff] [blame] | 701 | u32 savePIPEASTAT; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 702 | u32 saveDSPASTRIDE; |
| 703 | u32 saveDSPASIZE; |
| 704 | u32 saveDSPAPOS; |
Jesse Barnes | 585fb11 | 2008-07-29 11:54:06 -0700 | [diff] [blame] | 705 | u32 saveDSPAADDR; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 706 | u32 saveDSPASURF; |
| 707 | u32 saveDSPATILEOFF; |
| 708 | u32 savePFIT_PGM_RATIOS; |
Jesse Barnes | 0eb96d6 | 2009-10-14 12:33:41 -0700 | [diff] [blame] | 709 | u32 saveBLC_HIST_CTL; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 710 | u32 saveBLC_PWM_CTL; |
| 711 | u32 saveBLC_PWM_CTL2; |
Zhenyu Wang | 4204878 | 2009-10-21 15:27:01 +0800 | [diff] [blame] | 712 | u32 saveBLC_CPU_PWM_CTL; |
| 713 | u32 saveBLC_CPU_PWM_CTL2; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 714 | u32 saveFPB0; |
| 715 | u32 saveFPB1; |
| 716 | u32 saveDPLL_B; |
| 717 | u32 saveDPLL_B_MD; |
| 718 | u32 saveHTOTAL_B; |
| 719 | u32 saveHBLANK_B; |
| 720 | u32 saveHSYNC_B; |
| 721 | u32 saveVTOTAL_B; |
| 722 | u32 saveVBLANK_B; |
| 723 | u32 saveVSYNC_B; |
| 724 | u32 saveBCLRPAT_B; |
Zhenyu Wang | 5586c8b | 2009-11-06 02:13:02 +0000 | [diff] [blame] | 725 | u32 saveTRANSBCONF; |
Zhenyu Wang | 4204878 | 2009-10-21 15:27:01 +0800 | [diff] [blame] | 726 | u32 saveTRANS_HTOTAL_B; |
| 727 | u32 saveTRANS_HBLANK_B; |
| 728 | u32 saveTRANS_HSYNC_B; |
| 729 | u32 saveTRANS_VTOTAL_B; |
| 730 | u32 saveTRANS_VBLANK_B; |
| 731 | u32 saveTRANS_VSYNC_B; |
Jesse Barnes | 0da3ea1 | 2008-02-20 09:39:58 +1000 | [diff] [blame] | 732 | u32 savePIPEBSTAT; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 733 | u32 saveDSPBSTRIDE; |
| 734 | u32 saveDSPBSIZE; |
| 735 | u32 saveDSPBPOS; |
Jesse Barnes | 585fb11 | 2008-07-29 11:54:06 -0700 | [diff] [blame] | 736 | u32 saveDSPBADDR; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 737 | u32 saveDSPBSURF; |
| 738 | u32 saveDSPBTILEOFF; |
Jesse Barnes | 585fb11 | 2008-07-29 11:54:06 -0700 | [diff] [blame] | 739 | u32 saveVGA0; |
| 740 | u32 saveVGA1; |
| 741 | u32 saveVGA_PD; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 742 | u32 saveVGACNTRL; |
| 743 | u32 saveADPA; |
| 744 | u32 saveLVDS; |
Jesse Barnes | 585fb11 | 2008-07-29 11:54:06 -0700 | [diff] [blame] | 745 | u32 savePP_ON_DELAYS; |
| 746 | u32 savePP_OFF_DELAYS; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 747 | u32 saveDVOA; |
| 748 | u32 saveDVOB; |
| 749 | u32 saveDVOC; |
| 750 | u32 savePP_ON; |
| 751 | u32 savePP_OFF; |
| 752 | u32 savePP_CONTROL; |
Jesse Barnes | 585fb11 | 2008-07-29 11:54:06 -0700 | [diff] [blame] | 753 | u32 savePP_DIVISOR; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 754 | u32 savePFIT_CONTROL; |
| 755 | u32 save_palette_a[256]; |
| 756 | u32 save_palette_b[256]; |
Jesse Barnes | 06027f9 | 2009-10-05 13:47:26 -0700 | [diff] [blame] | 757 | u32 saveDPFC_CB_BASE; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 758 | u32 saveFBC_CFB_BASE; |
| 759 | u32 saveFBC_LL_BASE; |
| 760 | u32 saveFBC_CONTROL; |
| 761 | u32 saveFBC_CONTROL2; |
Jesse Barnes | 0da3ea1 | 2008-02-20 09:39:58 +1000 | [diff] [blame] | 762 | u32 saveIER; |
| 763 | u32 saveIIR; |
| 764 | u32 saveIMR; |
Zhenyu Wang | 4204878 | 2009-10-21 15:27:01 +0800 | [diff] [blame] | 765 | u32 saveDEIER; |
| 766 | u32 saveDEIMR; |
| 767 | u32 saveGTIER; |
| 768 | u32 saveGTIMR; |
| 769 | u32 saveFDI_RXA_IMR; |
| 770 | u32 saveFDI_RXB_IMR; |
Keith Packard | 1f84e55 | 2008-02-16 19:19:29 -0800 | [diff] [blame] | 771 | u32 saveCACHE_MODE_0; |
Keith Packard | 1f84e55 | 2008-02-16 19:19:29 -0800 | [diff] [blame] | 772 | u32 saveMI_ARB_STATE; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 773 | u32 saveSWF0[16]; |
| 774 | u32 saveSWF1[16]; |
| 775 | u32 saveSWF2[3]; |
| 776 | u8 saveMSR; |
| 777 | u8 saveSR[8]; |
Jesse Barnes | 123f794 | 2008-02-07 11:15:20 -0800 | [diff] [blame] | 778 | u8 saveGR[25]; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 779 | u8 saveAR_INDEX; |
Jesse Barnes | a59e122 | 2008-05-07 12:25:46 +1000 | [diff] [blame] | 780 | u8 saveAR[21]; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 781 | u8 saveDACMASK; |
Jesse Barnes | a59e122 | 2008-05-07 12:25:46 +1000 | [diff] [blame] | 782 | u8 saveCR[37]; |
Daniel Vetter | 4b9de73 | 2011-10-09 21:52:02 +0200 | [diff] [blame] | 783 | uint64_t saveFENCE[I915_MAX_NUM_FENCES]; |
Eric Anholt | 1fd1c62 | 2009-06-03 07:26:58 +0000 | [diff] [blame] | 784 | u32 saveCURACNTR; |
| 785 | u32 saveCURAPOS; |
| 786 | u32 saveCURABASE; |
| 787 | u32 saveCURBCNTR; |
| 788 | u32 saveCURBPOS; |
| 789 | u32 saveCURBBASE; |
| 790 | u32 saveCURSIZE; |
Keith Packard | a4fc5ed | 2009-04-07 16:16:42 -0700 | [diff] [blame] | 791 | u32 saveDP_B; |
| 792 | u32 saveDP_C; |
| 793 | u32 saveDP_D; |
| 794 | u32 savePIPEA_GMCH_DATA_M; |
| 795 | u32 savePIPEB_GMCH_DATA_M; |
| 796 | u32 savePIPEA_GMCH_DATA_N; |
| 797 | u32 savePIPEB_GMCH_DATA_N; |
| 798 | u32 savePIPEA_DP_LINK_M; |
| 799 | u32 savePIPEB_DP_LINK_M; |
| 800 | u32 savePIPEA_DP_LINK_N; |
| 801 | u32 savePIPEB_DP_LINK_N; |
Zhenyu Wang | 4204878 | 2009-10-21 15:27:01 +0800 | [diff] [blame] | 802 | u32 saveFDI_RXA_CTL; |
| 803 | u32 saveFDI_TXA_CTL; |
| 804 | u32 saveFDI_RXB_CTL; |
| 805 | u32 saveFDI_TXB_CTL; |
| 806 | u32 savePFA_CTL_1; |
| 807 | u32 savePFB_CTL_1; |
| 808 | u32 savePFA_WIN_SZ; |
| 809 | u32 savePFB_WIN_SZ; |
| 810 | u32 savePFA_WIN_POS; |
| 811 | u32 savePFB_WIN_POS; |
Zhenyu Wang | 5586c8b | 2009-11-06 02:13:02 +0000 | [diff] [blame] | 812 | u32 savePCH_DREF_CONTROL; |
| 813 | u32 saveDISP_ARB_CTL; |
| 814 | u32 savePIPEA_DATA_M1; |
| 815 | u32 savePIPEA_DATA_N1; |
| 816 | u32 savePIPEA_LINK_M1; |
| 817 | u32 savePIPEA_LINK_N1; |
| 818 | u32 savePIPEB_DATA_M1; |
| 819 | u32 savePIPEB_DATA_N1; |
| 820 | u32 savePIPEB_LINK_M1; |
| 821 | u32 savePIPEB_LINK_N1; |
Matthew Garrett | b5b72e8 | 2010-02-02 18:30:47 +0000 | [diff] [blame] | 822 | u32 saveMCHBAR_RENDER_STANDBY; |
Adam Jackson | cda2bb7 | 2011-07-26 16:53:06 -0400 | [diff] [blame] | 823 | u32 savePCH_PORT_HOTPLUG; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 824 | }; |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 825 | |
| 826 | struct intel_gen6_power_mgmt { |
Daniel Vetter | 59cdb63 | 2013-07-04 23:35:28 +0200 | [diff] [blame] | 827 | /* work and pm_iir are protected by dev_priv->irq_lock */ |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 828 | struct work_struct work; |
| 829 | u32 pm_iir; |
Daniel Vetter | 59cdb63 | 2013-07-04 23:35:28 +0200 | [diff] [blame] | 830 | |
| 831 | /* On vlv we need to manually drop to Vmin with a delayed work. */ |
| 832 | struct delayed_work vlv_work; |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 833 | |
| 834 | /* The below variables an all the rps hw state are protected by |
| 835 | * dev->struct mutext. */ |
| 836 | u8 cur_delay; |
| 837 | u8 min_delay; |
| 838 | u8 max_delay; |
Jesse Barnes | 52ceb90 | 2013-04-23 10:09:26 -0700 | [diff] [blame] | 839 | u8 rpe_delay; |
Ben Widawsky | 31c7738 | 2013-04-05 14:29:22 -0700 | [diff] [blame] | 840 | u8 hw_max; |
Jesse Barnes | 1a01ab3 | 2012-11-02 11:14:00 -0700 | [diff] [blame] | 841 | |
| 842 | struct delayed_work delayed_resume_work; |
Jesse Barnes | 4fc688c | 2012-11-02 11:14:01 -0700 | [diff] [blame] | 843 | |
| 844 | /* |
| 845 | * Protects RPS/RC6 register access and PCU communication. |
| 846 | * Must be taken after struct_mutex if nested. |
| 847 | */ |
| 848 | struct mutex hw_lock; |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 849 | }; |
| 850 | |
Daniel Vetter | 1a240d4 | 2012-11-29 22:18:51 +0100 | [diff] [blame] | 851 | /* defined intel_pm.c */ |
| 852 | extern spinlock_t mchdev_lock; |
| 853 | |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 854 | struct intel_ilk_power_mgmt { |
| 855 | u8 cur_delay; |
| 856 | u8 min_delay; |
| 857 | u8 max_delay; |
| 858 | u8 fmax; |
| 859 | u8 fstart; |
| 860 | |
| 861 | u64 last_count1; |
| 862 | unsigned long last_time1; |
| 863 | unsigned long chipset_power; |
| 864 | u64 last_count2; |
| 865 | struct timespec last_time2; |
| 866 | unsigned long gfx_power; |
| 867 | u8 corr; |
| 868 | |
| 869 | int c_m; |
| 870 | int r_t; |
Daniel Vetter | 3e37394 | 2012-11-02 19:55:04 +0100 | [diff] [blame] | 871 | |
| 872 | struct drm_i915_gem_object *pwrctx; |
| 873 | struct drm_i915_gem_object *renderctx; |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 874 | }; |
| 875 | |
Wang Xingchao | a38911a | 2013-05-30 22:07:11 +0800 | [diff] [blame] | 876 | /* Power well structure for haswell */ |
| 877 | struct i915_power_well { |
| 878 | struct drm_device *device; |
| 879 | spinlock_t lock; |
| 880 | /* power well enable/disable usage count */ |
| 881 | int count; |
| 882 | int i915_request; |
| 883 | }; |
| 884 | |
Daniel Vetter | 231f42a | 2012-11-02 19:55:05 +0100 | [diff] [blame] | 885 | struct i915_dri1_state { |
| 886 | unsigned allow_batchbuffer : 1; |
| 887 | u32 __iomem *gfx_hws_cpu_addr; |
| 888 | |
| 889 | unsigned int cpp; |
| 890 | int back_offset; |
| 891 | int front_offset; |
| 892 | int current_page; |
| 893 | int page_flipping; |
| 894 | |
| 895 | uint32_t counter; |
| 896 | }; |
| 897 | |
Daniel Vetter | db1b76c | 2013-07-09 16:51:37 +0200 | [diff] [blame] | 898 | struct i915_ums_state { |
| 899 | /** |
| 900 | * Flag if the X Server, and thus DRM, is not currently in |
| 901 | * control of the device. |
| 902 | * |
| 903 | * This is set between LeaveVT and EnterVT. It needs to be |
| 904 | * replaced with a semaphore. It also needs to be |
| 905 | * transitioned away from for kernel modesetting. |
| 906 | */ |
| 907 | int mm_suspended; |
| 908 | }; |
| 909 | |
Daniel Vetter | a4da4fa | 2012-11-02 19:55:07 +0100 | [diff] [blame] | 910 | struct intel_l3_parity { |
| 911 | u32 *remap_info; |
| 912 | struct work_struct error_work; |
| 913 | }; |
| 914 | |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 915 | struct i915_gem_mm { |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 916 | /** Memory allocator for GTT stolen memory */ |
| 917 | struct drm_mm stolen; |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 918 | /** List of all objects in gtt_space. Used to restore gtt |
| 919 | * mappings on resume */ |
| 920 | struct list_head bound_list; |
| 921 | /** |
| 922 | * List of objects which are not bound to the GTT (thus |
| 923 | * are idle and not used by the GPU) but still have |
| 924 | * (presumably uncached) pages still attached. |
| 925 | */ |
| 926 | struct list_head unbound_list; |
| 927 | |
| 928 | /** Usable portion of the GTT for GEM */ |
| 929 | unsigned long stolen_base; /* limited to low memory (32-bit) */ |
| 930 | |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 931 | /** PPGTT used for aliasing the PPGTT with the GTT */ |
| 932 | struct i915_hw_ppgtt *aliasing_ppgtt; |
| 933 | |
| 934 | struct shrinker inactive_shrinker; |
| 935 | bool shrinker_no_lock_stealing; |
| 936 | |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 937 | /** LRU list of objects with fence regs on them. */ |
| 938 | struct list_head fence_list; |
| 939 | |
| 940 | /** |
| 941 | * We leave the user IRQ off as much as possible, |
| 942 | * but this means that requests will finish and never |
| 943 | * be retired once the system goes idle. Set a timer to |
| 944 | * fire periodically while the ring is running. When it |
| 945 | * fires, go retire requests. |
| 946 | */ |
| 947 | struct delayed_work retire_work; |
| 948 | |
| 949 | /** |
| 950 | * Are we in a non-interruptible section of code like |
| 951 | * modesetting? |
| 952 | */ |
| 953 | bool interruptible; |
| 954 | |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 955 | /** Bit 6 swizzling required for X tiling */ |
| 956 | uint32_t bit_6_swizzle_x; |
| 957 | /** Bit 6 swizzling required for Y tiling */ |
| 958 | uint32_t bit_6_swizzle_y; |
| 959 | |
| 960 | /* storage for physical objects */ |
| 961 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; |
| 962 | |
| 963 | /* accounting, useful for userland debugging */ |
Daniel Vetter | c20e835 | 2013-07-24 22:40:23 +0200 | [diff] [blame] | 964 | spinlock_t object_stat_lock; |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 965 | size_t object_memory; |
| 966 | u32 object_count; |
| 967 | }; |
| 968 | |
Mika Kuoppala | edc3d88 | 2013-05-23 13:55:35 +0300 | [diff] [blame] | 969 | struct drm_i915_error_state_buf { |
| 970 | unsigned bytes; |
| 971 | unsigned size; |
| 972 | int err; |
| 973 | u8 *buf; |
| 974 | loff_t start; |
| 975 | loff_t pos; |
| 976 | }; |
| 977 | |
Mika Kuoppala | fc16b48 | 2013-06-06 15:18:39 +0300 | [diff] [blame] | 978 | struct i915_error_state_file_priv { |
| 979 | struct drm_device *dev; |
| 980 | struct drm_i915_error_state *error; |
| 981 | }; |
| 982 | |
Daniel Vetter | 99584db | 2012-11-14 17:14:04 +0100 | [diff] [blame] | 983 | struct i915_gpu_error { |
| 984 | /* For hangcheck timer */ |
| 985 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
| 986 | #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) |
| 987 | struct timer_list hangcheck_timer; |
Daniel Vetter | 99584db | 2012-11-14 17:14:04 +0100 | [diff] [blame] | 988 | |
| 989 | /* For reset and error_state handling. */ |
| 990 | spinlock_t lock; |
| 991 | /* Protected by the above dev->gpu_error.lock. */ |
| 992 | struct drm_i915_error_state *first_error; |
| 993 | struct work_struct work; |
Daniel Vetter | 99584db | 2012-11-14 17:14:04 +0100 | [diff] [blame] | 994 | |
| 995 | unsigned long last_reset; |
| 996 | |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 997 | /** |
Daniel Vetter | f69061b | 2012-12-06 09:01:42 +0100 | [diff] [blame] | 998 | * State variable and reset counter controlling the reset flow |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 999 | * |
Daniel Vetter | f69061b | 2012-12-06 09:01:42 +0100 | [diff] [blame] | 1000 | * Upper bits are for the reset counter. This counter is used by the |
| 1001 | * wait_seqno code to race-free noticed that a reset event happened and |
| 1002 | * that it needs to restart the entire ioctl (since most likely the |
| 1003 | * seqno it waited for won't ever signal anytime soon). |
| 1004 | * |
| 1005 | * This is important for lock-free wait paths, where no contended lock |
| 1006 | * naturally enforces the correct ordering between the bail-out of the |
| 1007 | * waiter and the gpu reset work code. |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1008 | * |
| 1009 | * Lowest bit controls the reset state machine: Set means a reset is in |
| 1010 | * progress. This state will (presuming we don't have any bugs) decay |
| 1011 | * into either unset (successful reset) or the special WEDGED value (hw |
| 1012 | * terminally sour). All waiters on the reset_queue will be woken when |
| 1013 | * that happens. |
| 1014 | */ |
| 1015 | atomic_t reset_counter; |
| 1016 | |
| 1017 | /** |
| 1018 | * Special values/flags for reset_counter |
| 1019 | * |
| 1020 | * Note that the code relies on |
| 1021 | * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG |
| 1022 | * being true. |
| 1023 | */ |
| 1024 | #define I915_RESET_IN_PROGRESS_FLAG 1 |
| 1025 | #define I915_WEDGED 0xffffffff |
| 1026 | |
| 1027 | /** |
| 1028 | * Waitqueue to signal when the reset has completed. Used by clients |
| 1029 | * that wait for dev_priv->mm.wedged to settle. |
| 1030 | */ |
| 1031 | wait_queue_head_t reset_queue; |
Daniel Vetter | 33196de | 2012-11-14 17:14:05 +0100 | [diff] [blame] | 1032 | |
Daniel Vetter | 99584db | 2012-11-14 17:14:04 +0100 | [diff] [blame] | 1033 | /* For gpu hang simulation. */ |
| 1034 | unsigned int stop_rings; |
| 1035 | }; |
| 1036 | |
Zhang Rui | b8efb17 | 2013-02-05 15:41:53 +0800 | [diff] [blame] | 1037 | enum modeset_restore { |
| 1038 | MODESET_ON_LID_OPEN, |
| 1039 | MODESET_DONE, |
| 1040 | MODESET_SUSPENDED, |
| 1041 | }; |
| 1042 | |
Rodrigo Vivi | 41aa344 | 2013-05-09 20:03:18 -0300 | [diff] [blame] | 1043 | struct intel_vbt_data { |
| 1044 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ |
| 1045 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ |
| 1046 | |
| 1047 | /* Feature bits */ |
| 1048 | unsigned int int_tv_support:1; |
| 1049 | unsigned int lvds_dither:1; |
| 1050 | unsigned int lvds_vbt:1; |
| 1051 | unsigned int int_crt_support:1; |
| 1052 | unsigned int lvds_use_ssc:1; |
| 1053 | unsigned int display_clock_mode:1; |
| 1054 | unsigned int fdi_rx_polarity_inverted:1; |
| 1055 | int lvds_ssc_freq; |
| 1056 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
| 1057 | |
| 1058 | /* eDP */ |
| 1059 | int edp_rate; |
| 1060 | int edp_lanes; |
| 1061 | int edp_preemphasis; |
| 1062 | int edp_vswing; |
| 1063 | bool edp_initialized; |
| 1064 | bool edp_support; |
| 1065 | int edp_bpp; |
| 1066 | struct edp_power_seq edp_pps; |
| 1067 | |
Shobhit Kumar | d17c544 | 2013-08-27 15:12:25 +0300 | [diff] [blame] | 1068 | /* MIPI DSI */ |
| 1069 | struct { |
| 1070 | u16 panel_id; |
| 1071 | } dsi; |
| 1072 | |
Rodrigo Vivi | 41aa344 | 2013-05-09 20:03:18 -0300 | [diff] [blame] | 1073 | int crt_ddc_pin; |
| 1074 | |
| 1075 | int child_dev_num; |
| 1076 | struct child_device_config *child_dev; |
| 1077 | }; |
| 1078 | |
Ville Syrjälä | 77c122b | 2013-08-06 22:24:04 +0300 | [diff] [blame] | 1079 | enum intel_ddb_partitioning { |
| 1080 | INTEL_DDB_PART_1_2, |
| 1081 | INTEL_DDB_PART_5_6, /* IVB+ */ |
| 1082 | }; |
| 1083 | |
Ville Syrjälä | 1fd527c | 2013-08-06 22:24:05 +0300 | [diff] [blame] | 1084 | struct intel_wm_level { |
| 1085 | bool enable; |
| 1086 | uint32_t pri_val; |
| 1087 | uint32_t spr_val; |
| 1088 | uint32_t cur_val; |
| 1089 | uint32_t fbc_val; |
| 1090 | }; |
| 1091 | |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 1092 | /* |
| 1093 | * This struct tracks the state needed for the Package C8+ feature. |
| 1094 | * |
| 1095 | * Package states C8 and deeper are really deep PC states that can only be |
| 1096 | * reached when all the devices on the system allow it, so even if the graphics |
| 1097 | * device allows PC8+, it doesn't mean the system will actually get to these |
| 1098 | * states. |
| 1099 | * |
| 1100 | * Our driver only allows PC8+ when all the outputs are disabled, the power well |
| 1101 | * is disabled and the GPU is idle. When these conditions are met, we manually |
| 1102 | * do the other conditions: disable the interrupts, clocks and switch LCPLL |
| 1103 | * refclk to Fclk. |
| 1104 | * |
| 1105 | * When we really reach PC8 or deeper states (not just when we allow it) we lose |
| 1106 | * the state of some registers, so when we come back from PC8+ we need to |
| 1107 | * restore this state. We don't get into PC8+ if we're not in RC6, so we don't |
| 1108 | * need to take care of the registers kept by RC6. |
| 1109 | * |
| 1110 | * The interrupt disabling is part of the requirements. We can only leave the |
| 1111 | * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we |
| 1112 | * can lock the machine. |
| 1113 | * |
| 1114 | * Ideally every piece of our code that needs PC8+ disabled would call |
| 1115 | * hsw_disable_package_c8, which would increment disable_count and prevent the |
| 1116 | * system from reaching PC8+. But we don't have a symmetric way to do this for |
| 1117 | * everything, so we have the requirements_met and gpu_idle variables. When we |
| 1118 | * switch requirements_met or gpu_idle to true we decrease disable_count, and |
| 1119 | * increase it in the opposite case. The requirements_met variable is true when |
| 1120 | * all the CRTCs, encoders and the power well are disabled. The gpu_idle |
| 1121 | * variable is true when the GPU is idle. |
| 1122 | * |
| 1123 | * In addition to everything, we only actually enable PC8+ if disable_count |
| 1124 | * stays at zero for at least some seconds. This is implemented with the |
| 1125 | * enable_work variable. We do this so we don't enable/disable PC8 dozens of |
| 1126 | * consecutive times when all screens are disabled and some background app |
| 1127 | * queries the state of our connectors, or we have some application constantly |
| 1128 | * waking up to use the GPU. Only after the enable_work function actually |
| 1129 | * enables PC8+ the "enable" variable will become true, which means that it can |
| 1130 | * be false even if disable_count is 0. |
| 1131 | * |
| 1132 | * The irqs_disabled variable becomes true exactly after we disable the IRQs and |
| 1133 | * goes back to false exactly before we reenable the IRQs. We use this variable |
| 1134 | * to check if someone is trying to enable/disable IRQs while they're supposed |
| 1135 | * to be disabled. This shouldn't happen and we'll print some error messages in |
| 1136 | * case it happens, but if it actually happens we'll also update the variables |
| 1137 | * inside struct regsave so when we restore the IRQs they will contain the |
| 1138 | * latest expected values. |
| 1139 | * |
| 1140 | * For more, read "Display Sequences for Package C8" on our documentation. |
| 1141 | */ |
| 1142 | struct i915_package_c8 { |
| 1143 | bool requirements_met; |
| 1144 | bool gpu_idle; |
| 1145 | bool irqs_disabled; |
| 1146 | /* Only true after the delayed work task actually enables it. */ |
| 1147 | bool enabled; |
| 1148 | int disable_count; |
| 1149 | struct mutex lock; |
| 1150 | struct delayed_work enable_work; |
| 1151 | |
| 1152 | struct { |
| 1153 | uint32_t deimr; |
| 1154 | uint32_t sdeimr; |
| 1155 | uint32_t gtimr; |
| 1156 | uint32_t gtier; |
| 1157 | uint32_t gen6_pmimr; |
| 1158 | } regsave; |
| 1159 | }; |
| 1160 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1161 | typedef struct drm_i915_private { |
| 1162 | struct drm_device *dev; |
Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 1163 | struct kmem_cache *slab; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1164 | |
| 1165 | const struct intel_device_info *info; |
| 1166 | |
| 1167 | int relative_constants_mode; |
| 1168 | |
| 1169 | void __iomem *regs; |
| 1170 | |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1171 | struct intel_uncore uncore; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1172 | |
| 1173 | struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; |
| 1174 | |
Daniel Vetter | 28c70f1 | 2012-12-01 13:53:45 +0100 | [diff] [blame] | 1175 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1176 | /** gmbus_mutex protects against concurrent usage of the single hw gmbus |
| 1177 | * controller on different i2c buses. */ |
| 1178 | struct mutex gmbus_mutex; |
| 1179 | |
| 1180 | /** |
| 1181 | * Base address of the gmbus and gpio block. |
| 1182 | */ |
| 1183 | uint32_t gpio_mmio_base; |
| 1184 | |
Daniel Vetter | 28c70f1 | 2012-12-01 13:53:45 +0100 | [diff] [blame] | 1185 | wait_queue_head_t gmbus_wait_queue; |
| 1186 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1187 | struct pci_dev *bridge_dev; |
| 1188 | struct intel_ring_buffer ring[I915_NUM_RINGS]; |
Mika Kuoppala | f72b343 | 2012-12-10 15:41:48 +0200 | [diff] [blame] | 1189 | uint32_t last_seqno, next_seqno; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1190 | |
| 1191 | drm_dma_handle_t *status_page_dmah; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1192 | struct resource mch_res; |
| 1193 | |
| 1194 | atomic_t irq_received; |
| 1195 | |
| 1196 | /* protects the irq masks */ |
| 1197 | spinlock_t irq_lock; |
| 1198 | |
Daniel Vetter | 9ee32fea | 2012-12-01 13:53:48 +0100 | [diff] [blame] | 1199 | /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ |
| 1200 | struct pm_qos_request pm_qos; |
| 1201 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1202 | /* DPIO indirect register protection */ |
Daniel Vetter | 0915300 | 2012-12-12 14:06:44 +0100 | [diff] [blame] | 1203 | struct mutex dpio_lock; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1204 | |
| 1205 | /** Cached value of IMR to avoid reads in updating the bitfield */ |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1206 | u32 irq_mask; |
| 1207 | u32 gt_irq_mask; |
Paulo Zanoni | 605cd25 | 2013-08-06 18:57:15 -0300 | [diff] [blame] | 1208 | u32 pm_irq_mask; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1209 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1210 | struct work_struct hotplug_work; |
Daniel Vetter | 52d7ece | 2012-12-01 21:03:22 +0100 | [diff] [blame] | 1211 | bool enable_hotplug_processing; |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 1212 | struct { |
| 1213 | unsigned long hpd_last_jiffies; |
| 1214 | int hpd_cnt; |
| 1215 | enum { |
| 1216 | HPD_ENABLED = 0, |
| 1217 | HPD_DISABLED = 1, |
| 1218 | HPD_MARK_DISABLED = 2 |
| 1219 | } hpd_mark; |
| 1220 | } hpd_stats[HPD_NUM_PINS]; |
Egbert Eich | 142e239 | 2013-04-11 15:57:57 +0200 | [diff] [blame] | 1221 | u32 hpd_event_bits; |
Egbert Eich | ac4c16c | 2013-04-16 13:36:58 +0200 | [diff] [blame] | 1222 | struct timer_list hotplug_reenable_timer; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1223 | |
Jesse Barnes | 7f1f385 | 2013-04-02 11:22:20 -0700 | [diff] [blame] | 1224 | int num_plane; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1225 | |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 1226 | struct i915_fbc fbc; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1227 | struct intel_opregion opregion; |
Rodrigo Vivi | 41aa344 | 2013-05-09 20:03:18 -0300 | [diff] [blame] | 1228 | struct intel_vbt_data vbt; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1229 | |
| 1230 | /* overlay */ |
| 1231 | struct intel_overlay *overlay; |
Ville Syrjälä | 2c6602d | 2013-02-08 23:13:35 +0200 | [diff] [blame] | 1232 | unsigned int sprite_scaling_enabled; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1233 | |
Jani Nikula | 31ad8ec | 2013-04-02 15:48:09 +0300 | [diff] [blame] | 1234 | /* backlight */ |
| 1235 | struct { |
| 1236 | int level; |
| 1237 | bool enabled; |
Jani Nikula | 8ba2d18 | 2013-04-12 15:18:37 +0300 | [diff] [blame] | 1238 | spinlock_t lock; /* bl registers and the above bl fields */ |
Jani Nikula | 31ad8ec | 2013-04-02 15:48:09 +0300 | [diff] [blame] | 1239 | struct backlight_device *device; |
| 1240 | } backlight; |
| 1241 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1242 | /* LVDS info */ |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1243 | bool no_aux_handshake; |
| 1244 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1245 | struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ |
| 1246 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
| 1247 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
| 1248 | |
| 1249 | unsigned int fsb_freq, mem_freq, is_ddr3; |
| 1250 | |
Daniel Vetter | 645416f | 2013-09-02 16:22:25 +0200 | [diff] [blame] | 1251 | /** |
| 1252 | * wq - Driver workqueue for GEM. |
| 1253 | * |
| 1254 | * NOTE: Work items scheduled here are not allowed to grab any modeset |
| 1255 | * locks, for otherwise the flushing done in the pageflip code will |
| 1256 | * result in deadlocks. |
| 1257 | */ |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1258 | struct workqueue_struct *wq; |
| 1259 | |
| 1260 | /* Display functions */ |
| 1261 | struct drm_i915_display_funcs display; |
| 1262 | |
| 1263 | /* PCH chipset type */ |
| 1264 | enum intel_pch pch_type; |
Paulo Zanoni | 17a303e | 2012-11-20 15:12:07 -0200 | [diff] [blame] | 1265 | unsigned short pch_id; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1266 | |
| 1267 | unsigned long quirks; |
| 1268 | |
Zhang Rui | b8efb17 | 2013-02-05 15:41:53 +0800 | [diff] [blame] | 1269 | enum modeset_restore modeset_restore; |
| 1270 | struct mutex modeset_restore_lock; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1271 | |
Ben Widawsky | a7bbbd6 | 2013-07-16 16:50:07 -0700 | [diff] [blame] | 1272 | struct list_head vm_list; /* Global list of all address spaces */ |
Ben Widawsky | 853ba5d | 2013-07-16 16:50:05 -0700 | [diff] [blame] | 1273 | struct i915_gtt gtt; /* VMA representing the global address space */ |
Ben Widawsky | 5d4545a | 2013-01-17 12:45:15 -0800 | [diff] [blame] | 1274 | |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 1275 | struct i915_gem_mm mm; |
Daniel Vetter | 8781342 | 2012-05-02 11:49:32 +0200 | [diff] [blame] | 1276 | |
Daniel Vetter | 8781342 | 2012-05-02 11:49:32 +0200 | [diff] [blame] | 1277 | /* Kernel Modesetting */ |
| 1278 | |
yakui_zhao | 9b9d172 | 2009-05-31 17:17:17 +0800 | [diff] [blame] | 1279 | struct sdvo_device_mapping sdvo_mappings[2]; |
Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 1280 | |
Jesse Barnes | 27f8227 | 2011-09-02 12:54:37 -0700 | [diff] [blame] | 1281 | struct drm_crtc *plane_to_crtc_mapping[3]; |
| 1282 | struct drm_crtc *pipe_to_crtc_mapping[3]; |
Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 1283 | wait_queue_head_t pending_flip_queue; |
| 1284 | |
Daniel Vetter | e72f9fb | 2013-06-05 13:34:06 +0200 | [diff] [blame] | 1285 | int num_shared_dpll; |
| 1286 | struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; |
Paulo Zanoni | 6441ab5 | 2012-10-05 12:05:58 -0300 | [diff] [blame] | 1287 | struct intel_ddi_plls ddi_plls; |
Jesse Barnes | ee7b9f9 | 2012-04-20 17:11:53 +0100 | [diff] [blame] | 1288 | |
Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 1289 | /* Reclocking support */ |
| 1290 | bool render_reclock_avail; |
| 1291 | bool lvds_downclock_avail; |
Zhao Yakui | 18f9ed1 | 2009-11-20 03:24:16 +0000 | [diff] [blame] | 1292 | /* indicates the reduced downclock for LVDS*/ |
| 1293 | int lvds_downclock; |
Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 1294 | u16 orig_clock; |
Jesse Barnes | f97108d | 2010-01-29 11:27:07 -0800 | [diff] [blame] | 1295 | |
Zhenyu Wang | c4804411 | 2009-12-17 14:48:43 +0800 | [diff] [blame] | 1296 | bool mchbar_need_disable; |
Jesse Barnes | f97108d | 2010-01-29 11:27:07 -0800 | [diff] [blame] | 1297 | |
Daniel Vetter | a4da4fa | 2012-11-02 19:55:07 +0100 | [diff] [blame] | 1298 | struct intel_l3_parity l3_parity; |
Daniel Vetter | c6a828d | 2012-08-08 23:35:35 +0200 | [diff] [blame] | 1299 | |
Ben Widawsky | 5912450 | 2013-07-04 11:02:05 -0700 | [diff] [blame] | 1300 | /* Cannot be determined by PCIID. You must always read a register. */ |
| 1301 | size_t ellc_size; |
| 1302 | |
Daniel Vetter | c6a828d | 2012-08-08 23:35:35 +0200 | [diff] [blame] | 1303 | /* gen6+ rps state */ |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 1304 | struct intel_gen6_power_mgmt rps; |
Daniel Vetter | c6a828d | 2012-08-08 23:35:35 +0200 | [diff] [blame] | 1305 | |
Daniel Vetter | 20e4d40 | 2012-08-08 23:35:39 +0200 | [diff] [blame] | 1306 | /* ilk-only ips/rps state. Everything in here is protected by the global |
| 1307 | * mchdev_lock in intel_pm.c */ |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 1308 | struct intel_ilk_power_mgmt ips; |
Jesse Barnes | b5e50c3 | 2010-02-05 12:42:41 -0800 | [diff] [blame] | 1309 | |
Wang Xingchao | a38911a | 2013-05-30 22:07:11 +0800 | [diff] [blame] | 1310 | /* Haswell power well */ |
| 1311 | struct i915_power_well power_well; |
| 1312 | |
Rodrigo Vivi | 3f51e47 | 2013-07-11 18:45:00 -0300 | [diff] [blame] | 1313 | enum no_psr_reason no_psr_reason; |
| 1314 | |
Daniel Vetter | 99584db | 2012-11-14 17:14:04 +0100 | [diff] [blame] | 1315 | struct i915_gpu_error gpu_error; |
Chris Wilson | ae681d9 | 2010-10-01 14:57:56 +0100 | [diff] [blame] | 1316 | |
Jesse Barnes | c9cddff | 2013-05-08 10:45:13 -0700 | [diff] [blame] | 1317 | struct drm_i915_gem_object *vlv_pctx; |
| 1318 | |
Dave Airlie | 8be48d9 | 2010-03-30 05:34:14 +0000 | [diff] [blame] | 1319 | /* list of fbdev register on this device */ |
| 1320 | struct intel_fbdev *fbdev; |
Chris Wilson | e953fd7 | 2011-02-21 22:23:52 +0000 | [diff] [blame] | 1321 | |
Jesse Barnes | 073f34d | 2012-11-02 11:13:59 -0700 | [diff] [blame] | 1322 | /* |
| 1323 | * The console may be contended at resume, but we don't |
| 1324 | * want it to block on it. |
| 1325 | */ |
| 1326 | struct work_struct console_resume_work; |
| 1327 | |
Chris Wilson | e953fd7 | 2011-02-21 22:23:52 +0000 | [diff] [blame] | 1328 | struct drm_property *broadcast_rgb_property; |
Chris Wilson | 3f43c48 | 2011-05-12 22:17:24 +0100 | [diff] [blame] | 1329 | struct drm_property *force_audio_property; |
Ben Widawsky | e368919 | 2012-05-25 16:56:22 -0700 | [diff] [blame] | 1330 | |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 1331 | bool hw_contexts_disabled; |
| 1332 | uint32_t hw_context_size; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1333 | |
Damien Lespiau | 3e68320 | 2012-12-11 18:48:29 +0000 | [diff] [blame] | 1334 | u32 fdi_rx_config; |
Paulo Zanoni | 68d18ad | 2012-12-01 12:04:26 -0200 | [diff] [blame] | 1335 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1336 | struct i915_suspend_saved_registers regfile; |
Daniel Vetter | 231f42a | 2012-11-02 19:55:05 +0100 | [diff] [blame] | 1337 | |
Ville Syrjälä | 53615a5 | 2013-08-01 16:18:50 +0300 | [diff] [blame] | 1338 | struct { |
| 1339 | /* |
| 1340 | * Raw watermark latency values: |
| 1341 | * in 0.1us units for WM0, |
| 1342 | * in 0.5us units for WM1+. |
| 1343 | */ |
| 1344 | /* primary */ |
| 1345 | uint16_t pri_latency[5]; |
| 1346 | /* sprite */ |
| 1347 | uint16_t spr_latency[5]; |
| 1348 | /* cursor */ |
| 1349 | uint16_t cur_latency[5]; |
| 1350 | } wm; |
| 1351 | |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 1352 | struct i915_package_c8 pc8; |
| 1353 | |
Daniel Vetter | 231f42a | 2012-11-02 19:55:05 +0100 | [diff] [blame] | 1354 | /* Old dri1 support infrastructure, beware the dragons ya fools entering |
| 1355 | * here! */ |
| 1356 | struct i915_dri1_state dri1; |
Daniel Vetter | db1b76c | 2013-07-09 16:51:37 +0200 | [diff] [blame] | 1357 | /* Old ums support infrastructure, same warning applies. */ |
| 1358 | struct i915_ums_state ums; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | } drm_i915_private_t; |
| 1360 | |
Chris Wilson | 2c1792a | 2013-08-01 18:39:55 +0100 | [diff] [blame] | 1361 | static inline struct drm_i915_private *to_i915(const struct drm_device *dev) |
| 1362 | { |
| 1363 | return dev->dev_private; |
| 1364 | } |
| 1365 | |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 1366 | /* Iterate over initialised rings */ |
| 1367 | #define for_each_ring(ring__, dev_priv__, i__) \ |
| 1368 | for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ |
| 1369 | if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) |
| 1370 | |
Wu Fengguang | b1d7e4b | 2012-02-14 11:45:36 +0800 | [diff] [blame] | 1371 | enum hdmi_force_audio { |
| 1372 | HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ |
| 1373 | HDMI_AUDIO_OFF, /* force turn off HDMI audio */ |
| 1374 | HDMI_AUDIO_AUTO, /* trust EDID */ |
| 1375 | HDMI_AUDIO_ON, /* force turn on HDMI audio */ |
| 1376 | }; |
| 1377 | |
Daniel Vetter | 190d6cd | 2013-07-04 13:06:28 +0200 | [diff] [blame] | 1378 | #define I915_GTT_OFFSET_NONE ((u32)-1) |
Chris Wilson | ed2f345 | 2012-11-15 11:32:19 +0000 | [diff] [blame] | 1379 | |
Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 1380 | struct drm_i915_gem_object_ops { |
| 1381 | /* Interface between the GEM object and its backing storage. |
| 1382 | * get_pages() is called once prior to the use of the associated set |
| 1383 | * of pages before to binding them into the GTT, and put_pages() is |
| 1384 | * called after we no longer need them. As we expect there to be |
| 1385 | * associated cost with migrating pages between the backing storage |
| 1386 | * and making them available for the GPU (e.g. clflush), we may hold |
| 1387 | * onto the pages after they are no longer referenced by the GPU |
| 1388 | * in case they may be used again shortly (for example migrating the |
| 1389 | * pages to a different memory domain within the GTT). put_pages() |
| 1390 | * will therefore most likely be called when the object itself is |
| 1391 | * being released or under memory pressure (where we attempt to |
| 1392 | * reap pages for the shrinker). |
| 1393 | */ |
| 1394 | int (*get_pages)(struct drm_i915_gem_object *); |
| 1395 | void (*put_pages)(struct drm_i915_gem_object *); |
| 1396 | }; |
| 1397 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1398 | struct drm_i915_gem_object { |
Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 1399 | struct drm_gem_object base; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1400 | |
Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 1401 | const struct drm_i915_gem_object_ops *ops; |
| 1402 | |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 1403 | /** List of VMAs backed by this object */ |
| 1404 | struct list_head vma_list; |
| 1405 | |
Chris Wilson | c1ad11f | 2012-11-15 11:32:21 +0000 | [diff] [blame] | 1406 | /** Stolen memory for this object, instead of being backed by shmem. */ |
| 1407 | struct drm_mm_node *stolen; |
Ben Widawsky | 35c20a6 | 2013-05-31 11:28:48 -0700 | [diff] [blame] | 1408 | struct list_head global_list; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1409 | |
Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 1410 | struct list_head ring_list; |
Ben Widawsky | b25cb2f | 2013-08-14 11:38:33 +0200 | [diff] [blame] | 1411 | /** Used in execbuf to temporarily hold a ref */ |
| 1412 | struct list_head obj_exec_link; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1413 | |
| 1414 | /** |
Chris Wilson | 65ce302 | 2012-07-20 12:41:02 +0100 | [diff] [blame] | 1415 | * This is set if the object is on the active lists (has pending |
| 1416 | * rendering and so a non-zero seqno), and is not set if it i s on |
| 1417 | * inactive (ready to be unbound) list. |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1418 | */ |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 1419 | unsigned int active:1; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1420 | |
| 1421 | /** |
| 1422 | * This is set if the object has been written to since last bound |
| 1423 | * to the GTT |
| 1424 | */ |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 1425 | unsigned int dirty:1; |
Daniel Vetter | 778c354 | 2010-05-13 11:49:44 +0200 | [diff] [blame] | 1426 | |
| 1427 | /** |
| 1428 | * Fence register bits (if any) for this object. Will be set |
| 1429 | * as needed when mapped into the GTT. |
| 1430 | * Protected by dev->struct_mutex. |
Daniel Vetter | 778c354 | 2010-05-13 11:49:44 +0200 | [diff] [blame] | 1431 | */ |
Daniel Vetter | 4b9de73 | 2011-10-09 21:52:02 +0200 | [diff] [blame] | 1432 | signed int fence_reg:I915_MAX_NUM_FENCE_BITS; |
Daniel Vetter | 778c354 | 2010-05-13 11:49:44 +0200 | [diff] [blame] | 1433 | |
| 1434 | /** |
Daniel Vetter | 778c354 | 2010-05-13 11:49:44 +0200 | [diff] [blame] | 1435 | * Advice: are the backing pages purgeable? |
| 1436 | */ |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 1437 | unsigned int madv:2; |
Daniel Vetter | 778c354 | 2010-05-13 11:49:44 +0200 | [diff] [blame] | 1438 | |
| 1439 | /** |
Daniel Vetter | 778c354 | 2010-05-13 11:49:44 +0200 | [diff] [blame] | 1440 | * Current tiling mode for the object. |
| 1441 | */ |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 1442 | unsigned int tiling_mode:2; |
Chris Wilson | 5d82e3e | 2012-04-21 16:23:23 +0100 | [diff] [blame] | 1443 | /** |
| 1444 | * Whether the tiling parameters for the currently associated fence |
| 1445 | * register have changed. Note that for the purposes of tracking |
| 1446 | * tiling changes we also treat the unfenced register, the register |
| 1447 | * slot that the object occupies whilst it executes a fenced |
| 1448 | * command (such as BLT on gen2/3), as a "fence". |
| 1449 | */ |
| 1450 | unsigned int fence_dirty:1; |
Daniel Vetter | 778c354 | 2010-05-13 11:49:44 +0200 | [diff] [blame] | 1451 | |
| 1452 | /** How many users have pinned this object in GTT space. The following |
| 1453 | * users can each hold at most one reference: pwrite/pread, pin_ioctl |
| 1454 | * (via user_pin_count), execbuffer (objects are not allowed multiple |
| 1455 | * times for the same batchbuffer), and the framebuffer code. When |
| 1456 | * switching/pageflipping, the framebuffer code has at most two buffers |
| 1457 | * pinned per crtc. |
| 1458 | * |
| 1459 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 |
| 1460 | * bits with absolutely no headroom. So use 4 bits. */ |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 1461 | unsigned int pin_count:4; |
Daniel Vetter | 778c354 | 2010-05-13 11:49:44 +0200 | [diff] [blame] | 1462 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1463 | |
Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 1464 | /** |
Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame] | 1465 | * Is the object at the current location in the gtt mappable and |
| 1466 | * fenceable? Used to avoid costly recalculations. |
| 1467 | */ |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 1468 | unsigned int map_and_fenceable:1; |
Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame] | 1469 | |
| 1470 | /** |
Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 1471 | * Whether the current gtt mapping needs to be mappable (and isn't just |
| 1472 | * mappable by accident). Track pin and fault separate for a more |
| 1473 | * accurate mappable working set. |
| 1474 | */ |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 1475 | unsigned int fault_mappable:1; |
| 1476 | unsigned int pin_mappable:1; |
Chris Wilson | cc98b41 | 2013-08-09 12:25:09 +0100 | [diff] [blame] | 1477 | unsigned int pin_display:1; |
Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 1478 | |
Chris Wilson | caea747 | 2010-11-12 13:53:37 +0000 | [diff] [blame] | 1479 | /* |
| 1480 | * Is the GPU currently using a fence to access this buffer, |
| 1481 | */ |
| 1482 | unsigned int pending_fenced_gpu_access:1; |
| 1483 | unsigned int fenced_gpu_access:1; |
| 1484 | |
Chris Wilson | 651d794 | 2013-08-08 14:41:10 +0100 | [diff] [blame] | 1485 | unsigned int cache_level:3; |
Chris Wilson | 93dfb40 | 2011-03-29 16:59:50 -0700 | [diff] [blame] | 1486 | |
Daniel Vetter | 7bddb01 | 2012-02-09 17:15:47 +0100 | [diff] [blame] | 1487 | unsigned int has_aliasing_ppgtt_mapping:1; |
Daniel Vetter | 74898d7 | 2012-02-15 23:50:22 +0100 | [diff] [blame] | 1488 | unsigned int has_global_gtt_mapping:1; |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 1489 | unsigned int has_dma_mapping:1; |
Daniel Vetter | 7bddb01 | 2012-02-09 17:15:47 +0100 | [diff] [blame] | 1490 | |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 1491 | struct sg_table *pages; |
Chris Wilson | a557017 | 2012-09-04 21:02:54 +0100 | [diff] [blame] | 1492 | int pages_pin_count; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1493 | |
Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 1494 | /* prime dma-buf support */ |
Dave Airlie | 9a70cc2 | 2012-05-22 13:09:21 +0100 | [diff] [blame] | 1495 | void *dma_buf_vmapping; |
| 1496 | int vmapping_count; |
| 1497 | |
Chris Wilson | caea747 | 2010-11-12 13:53:37 +0000 | [diff] [blame] | 1498 | struct intel_ring_buffer *ring; |
| 1499 | |
Chris Wilson | 1c293ea | 2012-04-17 15:31:27 +0100 | [diff] [blame] | 1500 | /** Breadcrumb of last rendering to the buffer. */ |
Chris Wilson | 0201f1e | 2012-07-20 12:41:01 +0100 | [diff] [blame] | 1501 | uint32_t last_read_seqno; |
| 1502 | uint32_t last_write_seqno; |
Chris Wilson | caea747 | 2010-11-12 13:53:37 +0000 | [diff] [blame] | 1503 | /** Breadcrumb of last fenced GPU access to the buffer. */ |
| 1504 | uint32_t last_fenced_seqno; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1505 | |
Daniel Vetter | 778c354 | 2010-05-13 11:49:44 +0200 | [diff] [blame] | 1506 | /** Current tiling stride for the object, if it's tiled. */ |
Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1507 | uint32_t stride; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1508 | |
Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 1509 | /** Record of address bit 17 of each page at last unbind. */ |
Chris Wilson | d312ec2 | 2010-06-06 15:40:22 +0100 | [diff] [blame] | 1510 | unsigned long *bit_17; |
Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 1511 | |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 1512 | /** User space pin count and filp owning the pin */ |
| 1513 | uint32_t user_pin_count; |
| 1514 | struct drm_file *pin_filp; |
Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 1515 | |
| 1516 | /** for phy allocated objects */ |
| 1517 | struct drm_i915_gem_phys_object *phys_obj; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1518 | }; |
Daniel Vetter | b45305f | 2012-12-17 16:21:27 +0100 | [diff] [blame] | 1519 | #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1520 | |
Daniel Vetter | 62b8b21 | 2010-04-09 19:05:08 +0000 | [diff] [blame] | 1521 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) |
Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1522 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1523 | /** |
| 1524 | * Request queue structure. |
| 1525 | * |
| 1526 | * The request queue allows us to note sequence numbers that have been emitted |
| 1527 | * and may be associated with active buffers to be retired. |
| 1528 | * |
| 1529 | * By keeping this list, we can avoid having to do questionable |
| 1530 | * sequence-number comparisons on buffer last_rendering_seqnos, and associate |
| 1531 | * an emission time with seqnos for tracking how far ahead of the GPU we are. |
| 1532 | */ |
| 1533 | struct drm_i915_gem_request { |
Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1534 | /** On Which ring this request was generated */ |
| 1535 | struct intel_ring_buffer *ring; |
| 1536 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1537 | /** GEM sequence number associated with this request. */ |
| 1538 | uint32_t seqno; |
| 1539 | |
Mika Kuoppala | 7d736f4 | 2013-06-12 15:01:39 +0300 | [diff] [blame] | 1540 | /** Position in the ringbuffer of the start of the request */ |
| 1541 | u32 head; |
| 1542 | |
| 1543 | /** Position in the ringbuffer of the end of the request */ |
Chris Wilson | a71d8d9 | 2012-02-15 11:25:36 +0000 | [diff] [blame] | 1544 | u32 tail; |
| 1545 | |
Mika Kuoppala | 0e50e96 | 2013-05-02 16:48:08 +0300 | [diff] [blame] | 1546 | /** Context related to this request */ |
| 1547 | struct i915_hw_context *ctx; |
| 1548 | |
Mika Kuoppala | 7d736f4 | 2013-06-12 15:01:39 +0300 | [diff] [blame] | 1549 | /** Batch buffer related to this request if any */ |
| 1550 | struct drm_i915_gem_object *batch_obj; |
| 1551 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1552 | /** Time at which this request was emitted, in jiffies. */ |
| 1553 | unsigned long emitted_jiffies; |
| 1554 | |
Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1555 | /** global list entry for this request */ |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1556 | struct list_head list; |
Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1557 | |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1558 | struct drm_i915_file_private *file_priv; |
Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1559 | /** file_priv list entry for this request */ |
| 1560 | struct list_head client_list; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1561 | }; |
| 1562 | |
| 1563 | struct drm_i915_file_private { |
| 1564 | struct { |
Luis R. Rodriguez | 99057c8 | 2012-11-29 12:45:06 -0800 | [diff] [blame] | 1565 | spinlock_t lock; |
Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1566 | struct list_head request_list; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1567 | } mm; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 1568 | struct idr context_idr; |
Mika Kuoppala | e59ec13 | 2013-06-12 12:35:28 +0300 | [diff] [blame] | 1569 | |
| 1570 | struct i915_ctx_hang_stats hang_stats; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1571 | }; |
| 1572 | |
Chris Wilson | 2c1792a | 2013-08-01 18:39:55 +0100 | [diff] [blame] | 1573 | #define INTEL_INFO(dev) (to_i915(dev)->info) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 1574 | |
| 1575 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
| 1576 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
| 1577 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
| 1578 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) |
| 1579 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
| 1580 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) |
| 1581 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) |
| 1582 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) |
| 1583 | #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) |
| 1584 | #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) |
| 1585 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) |
| 1586 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) |
| 1587 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) |
| 1588 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) |
| 1589 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) |
| 1590 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 1591 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
Jesse Barnes | 4b65177 | 2011-04-28 14:33:09 -0700 | [diff] [blame] | 1592 | #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) |
Jesse Barnes | 8ab4397 | 2012-10-25 12:15:42 -0700 | [diff] [blame] | 1593 | #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ |
| 1594 | (dev)->pci_device == 0x0152 || \ |
| 1595 | (dev)->pci_device == 0x015a) |
Daniel Vetter | 6547fbd | 2012-12-14 23:38:29 +0100 | [diff] [blame] | 1596 | #define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \ |
| 1597 | (dev)->pci_device == 0x0106 || \ |
| 1598 | (dev)->pci_device == 0x010A) |
Jesse Barnes | 70a3eb7 | 2012-03-28 13:39:21 -0700 | [diff] [blame] | 1599 | #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) |
Eugeni Dodonov | 4cae9ae | 2012-03-29 12:32:18 -0300 | [diff] [blame] | 1600 | #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 1601 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
Paulo Zanoni | ed1c9e2 | 2013-08-12 14:34:08 -0300 | [diff] [blame] | 1602 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ |
| 1603 | ((dev)->pci_device & 0xFF00) == 0x0C00) |
Paulo Zanoni | d567b07 | 2012-11-20 13:27:43 -0200 | [diff] [blame] | 1604 | #define IS_ULT(dev) (IS_HASWELL(dev) && \ |
| 1605 | ((dev)->pci_device & 0xFF00) == 0x0A00) |
Rodrigo Vivi | 9435373 | 2013-08-28 16:45:46 -0300 | [diff] [blame^] | 1606 | #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ |
| 1607 | ((dev)->pci_device & 0x00F0) == 0x0020) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 1608 | |
Jesse Barnes | 8543669 | 2011-04-06 12:11:14 -0700 | [diff] [blame] | 1609 | /* |
| 1610 | * The genX designation typically refers to the render engine, so render |
| 1611 | * capability related checks should use IS_GEN, while display and other checks |
| 1612 | * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular |
| 1613 | * chips, etc.). |
| 1614 | */ |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 1615 | #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) |
| 1616 | #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) |
| 1617 | #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) |
| 1618 | #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) |
| 1619 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) |
Jesse Barnes | 8543669 | 2011-04-06 12:11:14 -0700 | [diff] [blame] | 1620 | #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 1621 | |
| 1622 | #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) |
| 1623 | #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) |
Xiang, Haihao | f72a118 | 2013-05-28 19:22:22 -0700 | [diff] [blame] | 1624 | #define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring) |
Eugeni Dodonov | 3d29b84 | 2012-01-17 14:43:53 -0200 | [diff] [blame] | 1625 | #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) |
Chris Wilson | 651d794 | 2013-08-08 14:41:10 +0100 | [diff] [blame] | 1626 | #define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 1627 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
| 1628 | |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 1629 | #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) |
Jesse Barnes | 9355360 | 2012-06-15 11:55:23 -0700 | [diff] [blame] | 1630 | #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1631 | |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1632 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 1633 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) |
| 1634 | |
Daniel Vetter | b45305f | 2012-12-17 16:21:27 +0100 | [diff] [blame] | 1635 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ |
| 1636 | #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) |
| 1637 | |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 1638 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
| 1639 | * rows, which changed the alignment requirements and fence programming. |
| 1640 | */ |
| 1641 | #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ |
| 1642 | IS_I915GM(dev))) |
| 1643 | #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) |
| 1644 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) |
| 1645 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) |
| 1646 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) |
| 1647 | #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) |
| 1648 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 1649 | |
| 1650 | #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) |
| 1651 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) |
| 1652 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 1653 | |
Damien Lespiau | f5adf94 | 2013-06-24 18:29:34 +0100 | [diff] [blame] | 1654 | #define HAS_IPS(dev) (IS_ULT(dev)) |
| 1655 | |
Damien Lespiau | dd93be5 | 2013-04-22 18:40:39 +0100 | [diff] [blame] | 1656 | #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) |
Paulo Zanoni | 86d52df | 2013-03-06 20:03:18 -0300 | [diff] [blame] | 1657 | #define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) |
Damien Lespiau | 30568c4 | 2013-04-22 18:40:41 +0100 | [diff] [blame] | 1658 | #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) |
Paulo Zanoni | affa935 | 2012-11-23 15:30:39 -0200 | [diff] [blame] | 1659 | |
Paulo Zanoni | 17a303e | 2012-11-20 15:12:07 -0200 | [diff] [blame] | 1660 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
| 1661 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
| 1662 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
| 1663 | #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 |
| 1664 | #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 |
| 1665 | #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 |
| 1666 | |
Chris Wilson | 2c1792a | 2013-08-01 18:39:55 +0100 | [diff] [blame] | 1667 | #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type) |
Eugeni Dodonov | eb877eb | 2012-03-29 12:32:20 -0300 | [diff] [blame] | 1668 | #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 1669 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
| 1670 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) |
Ben Widawsky | 40c7ead | 2013-04-05 13:12:40 -0700 | [diff] [blame] | 1671 | #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) |
Paulo Zanoni | 45e6e3a | 2012-07-03 15:57:32 -0300 | [diff] [blame] | 1672 | #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 1673 | |
Daniel Vetter | b7884eb | 2012-06-04 11:18:15 +0200 | [diff] [blame] | 1674 | #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) |
| 1675 | |
Ben Widawsky | f27b926 | 2012-07-24 20:47:32 -0700 | [diff] [blame] | 1676 | #define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
Ben Widawsky | e1ef7cc | 2012-07-24 20:47:31 -0700 | [diff] [blame] | 1677 | |
Ben Widawsky | c8735b0 | 2012-09-07 19:43:39 -0700 | [diff] [blame] | 1678 | #define GT_FREQUENCY_MULTIPLIER 50 |
| 1679 | |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1680 | #include "i915_trace.h" |
| 1681 | |
Eugeni Dodonov | 83b7f9a | 2012-03-23 11:57:18 -0300 | [diff] [blame] | 1682 | /** |
| 1683 | * RC6 is a special power stage which allows the GPU to enter an very |
| 1684 | * low-voltage mode when idle, using down to 0V while at this stage. This |
| 1685 | * stage is entered automatically when the GPU is idle when RC6 support is |
| 1686 | * enabled, and as soon as new workload arises GPU wakes up automatically as well. |
| 1687 | * |
| 1688 | * There are different RC6 modes available in Intel GPU, which differentiate |
| 1689 | * among each other with the latency required to enter and leave RC6 and |
| 1690 | * voltage consumed by the GPU in different states. |
| 1691 | * |
| 1692 | * The combination of the following flags define which states GPU is allowed |
| 1693 | * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and |
| 1694 | * RC6pp is deepest RC6. Their support by hardware varies according to the |
| 1695 | * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one |
| 1696 | * which brings the most power savings; deeper states save more power, but |
| 1697 | * require higher latency to switch to and wake up. |
| 1698 | */ |
| 1699 | #define INTEL_RC6_ENABLE (1<<0) |
| 1700 | #define INTEL_RC6p_ENABLE (1<<1) |
| 1701 | #define INTEL_RC6pp_ENABLE (1<<2) |
| 1702 | |
Rob Clark | baa7094 | 2013-08-02 13:27:49 -0400 | [diff] [blame] | 1703 | extern const struct drm_ioctl_desc i915_ioctls[]; |
Dave Airlie | b3a8363 | 2005-09-30 18:37:36 +1000 | [diff] [blame] | 1704 | extern int i915_max_ioctl; |
Ben Widawsky | a35d9d3 | 2011-07-13 14:38:17 -0700 | [diff] [blame] | 1705 | extern unsigned int i915_fbpercrtc __always_unused; |
| 1706 | extern int i915_panel_ignore_lid __read_mostly; |
| 1707 | extern unsigned int i915_powersave __read_mostly; |
Eugeni Dodonov | f45b555 | 2011-12-09 17:16:37 -0800 | [diff] [blame] | 1708 | extern int i915_semaphores __read_mostly; |
Ben Widawsky | a35d9d3 | 2011-07-13 14:38:17 -0700 | [diff] [blame] | 1709 | extern unsigned int i915_lvds_downclock __read_mostly; |
Takashi Iwai | 121d527 | 2012-03-20 13:07:06 +0100 | [diff] [blame] | 1710 | extern int i915_lvds_channel_mode __read_mostly; |
Keith Packard | 4415e63 | 2011-11-09 09:57:50 -0800 | [diff] [blame] | 1711 | extern int i915_panel_use_ssc __read_mostly; |
Ben Widawsky | a35d9d3 | 2011-07-13 14:38:17 -0700 | [diff] [blame] | 1712 | extern int i915_vbt_sdvo_panel_type __read_mostly; |
Keith Packard | c0f372b3 | 2011-11-16 22:24:52 -0800 | [diff] [blame] | 1713 | extern int i915_enable_rc6 __read_mostly; |
Keith Packard | 4415e63 | 2011-11-09 09:57:50 -0800 | [diff] [blame] | 1714 | extern int i915_enable_fbc __read_mostly; |
Ben Widawsky | a35d9d3 | 2011-07-13 14:38:17 -0700 | [diff] [blame] | 1715 | extern bool i915_enable_hangcheck __read_mostly; |
Daniel Vetter | 650dc07 | 2012-04-02 10:08:35 +0200 | [diff] [blame] | 1716 | extern int i915_enable_ppgtt __read_mostly; |
Rodrigo Vivi | 105b7c1 | 2013-07-11 18:45:02 -0300 | [diff] [blame] | 1717 | extern int i915_enable_psr __read_mostly; |
Rodrigo Vivi | 0a3af26 | 2012-10-15 17:16:23 -0300 | [diff] [blame] | 1718 | extern unsigned int i915_preliminary_hw_support __read_mostly; |
Paulo Zanoni | 2124b72 | 2013-03-22 14:07:23 -0300 | [diff] [blame] | 1719 | extern int i915_disable_power_well __read_mostly; |
Paulo Zanoni | 3c4ca58 | 2013-05-31 16:33:23 -0300 | [diff] [blame] | 1720 | extern int i915_enable_ips __read_mostly; |
Jesse Barnes | 2385bdf | 2013-06-26 01:38:15 +0300 | [diff] [blame] | 1721 | extern bool i915_fastboot __read_mostly; |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 1722 | extern int i915_enable_pc8 __read_mostly; |
Paulo Zanoni | 9005874 | 2013-08-19 13:18:11 -0300 | [diff] [blame] | 1723 | extern int i915_pc8_timeout __read_mostly; |
Xiong Zhang | 0b74b50 | 2013-07-19 13:51:24 +0800 | [diff] [blame] | 1724 | extern bool i915_prefault_disable __read_mostly; |
Dave Airlie | b3a8363 | 2005-09-30 18:37:36 +1000 | [diff] [blame] | 1725 | |
Dave Airlie | 6a9ee8a | 2010-02-01 15:38:10 +1000 | [diff] [blame] | 1726 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
| 1727 | extern int i915_resume(struct drm_device *dev); |
Dave Airlie | 7c1c287 | 2008-11-28 14:22:24 +1000 | [diff] [blame] | 1728 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
| 1729 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
| 1730 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1731 | /* i915_dma.c */ |
Daniel Vetter | d05c617 | 2012-04-26 23:28:09 +0200 | [diff] [blame] | 1732 | void i915_update_dri1_breadcrumb(struct drm_device *dev); |
Dave Airlie | 84b1fd1 | 2007-07-11 15:53:27 +1000 | [diff] [blame] | 1733 | extern void i915_kernel_lost_context(struct drm_device * dev); |
Dave Airlie | 22eae94 | 2005-11-10 22:16:34 +1100 | [diff] [blame] | 1734 | extern int i915_driver_load(struct drm_device *, unsigned long flags); |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 1735 | extern int i915_driver_unload(struct drm_device *); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1736 | extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); |
Dave Airlie | 84b1fd1 | 2007-07-11 15:53:27 +1000 | [diff] [blame] | 1737 | extern void i915_driver_lastclose(struct drm_device * dev); |
Eric Anholt | 6c340ea | 2007-08-25 20:23:09 +1000 | [diff] [blame] | 1738 | extern void i915_driver_preclose(struct drm_device *dev, |
| 1739 | struct drm_file *file_priv); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1740 | extern void i915_driver_postclose(struct drm_device *dev, |
| 1741 | struct drm_file *file_priv); |
Dave Airlie | 84b1fd1 | 2007-07-11 15:53:27 +1000 | [diff] [blame] | 1742 | extern int i915_driver_device_is_agp(struct drm_device * dev); |
Ben Widawsky | c43b563 | 2012-04-16 14:07:40 -0700 | [diff] [blame] | 1743 | #ifdef CONFIG_COMPAT |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 1744 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
| 1745 | unsigned long arg); |
Ben Widawsky | c43b563 | 2012-04-16 14:07:40 -0700 | [diff] [blame] | 1746 | #endif |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1747 | extern int i915_emit_box(struct drm_device *dev, |
Chris Wilson | c4e7a41 | 2010-11-30 14:10:25 +0000 | [diff] [blame] | 1748 | struct drm_clip_rect *box, |
| 1749 | int DR1, int DR4); |
Ben Widawsky | 8e96d9c | 2012-06-04 14:42:56 -0700 | [diff] [blame] | 1750 | extern int intel_gpu_reset(struct drm_device *dev); |
Daniel Vetter | d4b8bb2 | 2012-04-27 15:17:44 +0200 | [diff] [blame] | 1751 | extern int i915_reset(struct drm_device *dev); |
Jesse Barnes | 7648fa9 | 2010-05-20 14:28:11 -0700 | [diff] [blame] | 1752 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); |
| 1753 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); |
| 1754 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); |
| 1755 | extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); |
| 1756 | |
Jesse Barnes | 073f34d | 2012-11-02 11:13:59 -0700 | [diff] [blame] | 1757 | extern void intel_console_resume(struct work_struct *work); |
Dave Airlie | af6061a | 2008-05-07 12:15:39 +1000 | [diff] [blame] | 1758 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1759 | /* i915_irq.c */ |
Mika Kuoppala | 10cd45b | 2013-07-03 17:22:08 +0300 | [diff] [blame] | 1760 | void i915_queue_hangcheck(struct drm_device *dev); |
Chris Wilson | 527f9e9 | 2010-11-11 01:16:58 +0000 | [diff] [blame] | 1761 | void i915_handle_error(struct drm_device *dev, bool wedged); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1762 | |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 1763 | extern void intel_irq_init(struct drm_device *dev); |
Ben Widawsky | e1b4d30 | 2013-07-30 16:27:57 -0700 | [diff] [blame] | 1764 | extern void intel_pm_init(struct drm_device *dev); |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 1765 | extern void intel_hpd_init(struct drm_device *dev); |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1766 | extern void intel_pm_init(struct drm_device *dev); |
| 1767 | |
| 1768 | extern void intel_uncore_sanitize(struct drm_device *dev); |
| 1769 | extern void intel_uncore_early_sanitize(struct drm_device *dev); |
| 1770 | extern void intel_uncore_init(struct drm_device *dev); |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1771 | extern void intel_uncore_clear_errors(struct drm_device *dev); |
| 1772 | extern void intel_uncore_check_errors(struct drm_device *dev); |
Jesse Barnes | b1f14ad | 2011-04-06 12:13:38 -0700 | [diff] [blame] | 1773 | |
Keith Packard | 7c46358 | 2008-11-04 02:03:27 -0800 | [diff] [blame] | 1774 | void |
| 1775 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
| 1776 | |
| 1777 | void |
| 1778 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
| 1779 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1780 | /* i915_gem.c */ |
| 1781 | int i915_gem_init_ioctl(struct drm_device *dev, void *data, |
| 1782 | struct drm_file *file_priv); |
| 1783 | int i915_gem_create_ioctl(struct drm_device *dev, void *data, |
| 1784 | struct drm_file *file_priv); |
| 1785 | int i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
| 1786 | struct drm_file *file_priv); |
| 1787 | int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
| 1788 | struct drm_file *file_priv); |
| 1789 | int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
| 1790 | struct drm_file *file_priv); |
Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1791 | int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, |
| 1792 | struct drm_file *file_priv); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1793 | int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
| 1794 | struct drm_file *file_priv); |
| 1795 | int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
| 1796 | struct drm_file *file_priv); |
| 1797 | int i915_gem_execbuffer(struct drm_device *dev, void *data, |
| 1798 | struct drm_file *file_priv); |
Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 1799 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, |
| 1800 | struct drm_file *file_priv); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1801 | int i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
| 1802 | struct drm_file *file_priv); |
| 1803 | int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
| 1804 | struct drm_file *file_priv); |
| 1805 | int i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
| 1806 | struct drm_file *file_priv); |
Ben Widawsky | 199adf4 | 2012-09-21 17:01:20 -0700 | [diff] [blame] | 1807 | int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, |
| 1808 | struct drm_file *file); |
| 1809 | int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, |
| 1810 | struct drm_file *file); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1811 | int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
| 1812 | struct drm_file *file_priv); |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1813 | int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
| 1814 | struct drm_file *file_priv); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1815 | int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, |
| 1816 | struct drm_file *file_priv); |
| 1817 | int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
| 1818 | struct drm_file *file_priv); |
| 1819 | int i915_gem_set_tiling(struct drm_device *dev, void *data, |
| 1820 | struct drm_file *file_priv); |
| 1821 | int i915_gem_get_tiling(struct drm_device *dev, void *data, |
| 1822 | struct drm_file *file_priv); |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 1823 | int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
| 1824 | struct drm_file *file_priv); |
Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 1825 | int i915_gem_wait_ioctl(struct drm_device *dev, void *data, |
| 1826 | struct drm_file *file_priv); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1827 | void i915_gem_load(struct drm_device *dev); |
Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 1828 | void *i915_gem_object_alloc(struct drm_device *dev); |
| 1829 | void i915_gem_object_free(struct drm_i915_gem_object *obj); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1830 | int i915_gem_init_object(struct drm_gem_object *obj); |
Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 1831 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
| 1832 | const struct drm_i915_gem_object_ops *ops); |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1833 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
| 1834 | size_t size); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1835 | void i915_gem_free_object(struct drm_gem_object *obj); |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 1836 | void i915_gem_vma_destroy(struct i915_vma *vma); |
Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 1837 | |
Chris Wilson | 2021746 | 2010-11-23 15:26:33 +0000 | [diff] [blame] | 1838 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
Ben Widawsky | c37e220 | 2013-07-31 16:59:58 -0700 | [diff] [blame] | 1839 | struct i915_address_space *vm, |
Chris Wilson | 2021746 | 2010-11-23 15:26:33 +0000 | [diff] [blame] | 1840 | uint32_t alignment, |
Chris Wilson | 86a1ee2 | 2012-08-11 15:41:04 +0100 | [diff] [blame] | 1841 | bool map_and_fenceable, |
| 1842 | bool nonblocking); |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1843 | void i915_gem_object_unpin(struct drm_i915_gem_object *obj); |
Ben Widawsky | 07fe0b1 | 2013-07-31 17:00:10 -0700 | [diff] [blame] | 1844 | int __must_check i915_vma_unbind(struct i915_vma *vma); |
| 1845 | int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); |
Chris Wilson | dd624af | 2013-01-15 12:39:35 +0000 | [diff] [blame] | 1846 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1847 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1848 | void i915_gem_lastclose(struct drm_device *dev); |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1849 | |
Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 1850 | int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 1851 | static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) |
| 1852 | { |
Imre Deak | 67d5a50 | 2013-02-18 19:28:02 +0200 | [diff] [blame] | 1853 | struct sg_page_iter sg_iter; |
Chris Wilson | 1cf8378 | 2012-10-10 12:11:52 +0100 | [diff] [blame] | 1854 | |
Imre Deak | 67d5a50 | 2013-02-18 19:28:02 +0200 | [diff] [blame] | 1855 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n) |
Imre Deak | 2db76d7 | 2013-03-26 15:14:18 +0200 | [diff] [blame] | 1856 | return sg_page_iter_page(&sg_iter); |
Imre Deak | 67d5a50 | 2013-02-18 19:28:02 +0200 | [diff] [blame] | 1857 | |
| 1858 | return NULL; |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 1859 | } |
Chris Wilson | a557017 | 2012-09-04 21:02:54 +0100 | [diff] [blame] | 1860 | static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) |
| 1861 | { |
| 1862 | BUG_ON(obj->pages == NULL); |
| 1863 | obj->pages_pin_count++; |
| 1864 | } |
| 1865 | static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) |
| 1866 | { |
| 1867 | BUG_ON(obj->pages_pin_count == 0); |
| 1868 | obj->pages_pin_count--; |
| 1869 | } |
| 1870 | |
Chris Wilson | 54cf91d | 2010-11-25 18:00:26 +0000 | [diff] [blame] | 1871 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
Ben Widawsky | 2911a35 | 2012-04-05 14:47:36 -0700 | [diff] [blame] | 1872 | int i915_gem_object_sync(struct drm_i915_gem_object *obj, |
| 1873 | struct intel_ring_buffer *to); |
Chris Wilson | 54cf91d | 2010-11-25 18:00:26 +0000 | [diff] [blame] | 1874 | void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
Chris Wilson | 9d773091 | 2012-11-27 16:22:52 +0000 | [diff] [blame] | 1875 | struct intel_ring_buffer *ring); |
Chris Wilson | 54cf91d | 2010-11-25 18:00:26 +0000 | [diff] [blame] | 1876 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 1877 | int i915_gem_dumb_create(struct drm_file *file_priv, |
| 1878 | struct drm_device *dev, |
| 1879 | struct drm_mode_create_dumb *args); |
| 1880 | int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, |
| 1881 | uint32_t handle, uint64_t *offset); |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1882 | /** |
| 1883 | * Returns true if seq1 is later than seq2. |
| 1884 | */ |
| 1885 | static inline bool |
| 1886 | i915_seqno_passed(uint32_t seq1, uint32_t seq2) |
| 1887 | { |
| 1888 | return (int32_t)(seq1 - seq2) >= 0; |
| 1889 | } |
| 1890 | |
Mika Kuoppala | fca26bb | 2012-12-19 11:13:08 +0200 | [diff] [blame] | 1891 | int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); |
| 1892 | int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); |
Chris Wilson | 06d9813 | 2012-04-17 15:31:24 +0100 | [diff] [blame] | 1893 | int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); |
Chris Wilson | d9e86c0 | 2010-11-10 16:40:20 +0000 | [diff] [blame] | 1894 | int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
Chris Wilson | 2021746 | 2010-11-23 15:26:33 +0000 | [diff] [blame] | 1895 | |
Chris Wilson | 9a5a53b | 2012-03-22 15:10:00 +0000 | [diff] [blame] | 1896 | static inline bool |
Chris Wilson | 1690e1e | 2011-12-14 13:57:08 +0100 | [diff] [blame] | 1897 | i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) |
| 1898 | { |
| 1899 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
| 1900 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
| 1901 | dev_priv->fence_regs[obj->fence_reg].pin_count++; |
Chris Wilson | 9a5a53b | 2012-03-22 15:10:00 +0000 | [diff] [blame] | 1902 | return true; |
| 1903 | } else |
| 1904 | return false; |
Chris Wilson | 1690e1e | 2011-12-14 13:57:08 +0100 | [diff] [blame] | 1905 | } |
| 1906 | |
| 1907 | static inline void |
| 1908 | i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) |
| 1909 | { |
| 1910 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
| 1911 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
Chris Wilson | b8c3af7 | 2013-06-12 11:29:47 +0100 | [diff] [blame] | 1912 | WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); |
Chris Wilson | 1690e1e | 2011-12-14 13:57:08 +0100 | [diff] [blame] | 1913 | dev_priv->fence_regs[obj->fence_reg].pin_count--; |
| 1914 | } |
| 1915 | } |
| 1916 | |
Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 1917 | void i915_gem_retire_requests(struct drm_device *dev); |
Chris Wilson | a71d8d9 | 2012-02-15 11:25:36 +0000 | [diff] [blame] | 1918 | void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); |
Daniel Vetter | 33196de | 2012-11-14 17:14:05 +0100 | [diff] [blame] | 1919 | int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, |
Daniel Vetter | d6b2c79 | 2012-07-04 22:54:13 +0200 | [diff] [blame] | 1920 | bool interruptible); |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1921 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) |
| 1922 | { |
| 1923 | return unlikely(atomic_read(&error->reset_counter) |
| 1924 | & I915_RESET_IN_PROGRESS_FLAG); |
| 1925 | } |
| 1926 | |
| 1927 | static inline bool i915_terminally_wedged(struct i915_gpu_error *error) |
| 1928 | { |
| 1929 | return atomic_read(&error->reset_counter) == I915_WEDGED; |
| 1930 | } |
Chris Wilson | a71d8d9 | 2012-02-15 11:25:36 +0000 | [diff] [blame] | 1931 | |
Chris Wilson | 069efc1 | 2010-09-30 16:53:18 +0100 | [diff] [blame] | 1932 | void i915_gem_reset(struct drm_device *dev); |
Chris Wilson | 000433b | 2013-08-08 14:41:09 +0100 | [diff] [blame] | 1933 | bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); |
Chris Wilson | a8198ee | 2011-04-13 22:04:09 +0100 | [diff] [blame] | 1934 | int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); |
Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 1935 | int __must_check i915_gem_init(struct drm_device *dev); |
Daniel Vetter | f691e2f | 2012-02-02 09:58:12 +0100 | [diff] [blame] | 1936 | int __must_check i915_gem_init_hw(struct drm_device *dev); |
Ben Widawsky | b9524a1 | 2012-05-25 16:56:24 -0700 | [diff] [blame] | 1937 | void i915_gem_l3_remap(struct drm_device *dev); |
Daniel Vetter | f691e2f | 2012-02-02 09:58:12 +0100 | [diff] [blame] | 1938 | void i915_gem_init_swizzling(struct drm_device *dev); |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 1939 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
Ben Widawsky | b2da9fe | 2012-04-26 16:02:58 -0700 | [diff] [blame] | 1940 | int __must_check i915_gpu_idle(struct drm_device *dev); |
Chris Wilson | 2021746 | 2010-11-23 15:26:33 +0000 | [diff] [blame] | 1941 | int __must_check i915_gem_idle(struct drm_device *dev); |
Mika Kuoppala | 0025c07 | 2013-06-12 12:35:30 +0300 | [diff] [blame] | 1942 | int __i915_add_request(struct intel_ring_buffer *ring, |
| 1943 | struct drm_file *file, |
Mika Kuoppala | 7d736f4 | 2013-06-12 15:01:39 +0300 | [diff] [blame] | 1944 | struct drm_i915_gem_object *batch_obj, |
Mika Kuoppala | 0025c07 | 2013-06-12 12:35:30 +0300 | [diff] [blame] | 1945 | u32 *seqno); |
| 1946 | #define i915_add_request(ring, seqno) \ |
Dan Carpenter | 854c94a | 2013-06-18 10:29:58 +0300 | [diff] [blame] | 1947 | __i915_add_request(ring, NULL, NULL, seqno) |
Ben Widawsky | 199b2bc | 2012-05-24 15:03:11 -0700 | [diff] [blame] | 1948 | int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, |
| 1949 | uint32_t seqno); |
Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1950 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
Chris Wilson | 2021746 | 2010-11-23 15:26:33 +0000 | [diff] [blame] | 1951 | int __must_check |
| 1952 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
| 1953 | bool write); |
| 1954 | int __must_check |
Chris Wilson | dabdfe0 | 2012-03-26 10:10:27 +0200 | [diff] [blame] | 1955 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); |
| 1956 | int __must_check |
Chris Wilson | 2da3b9b | 2011-04-14 09:41:17 +0100 | [diff] [blame] | 1957 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
| 1958 | u32 alignment, |
Chris Wilson | 2021746 | 2010-11-23 15:26:33 +0000 | [diff] [blame] | 1959 | struct intel_ring_buffer *pipelined); |
Chris Wilson | cc98b41 | 2013-08-09 12:25:09 +0100 | [diff] [blame] | 1960 | void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); |
Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 1961 | int i915_gem_attach_phys_object(struct drm_device *dev, |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1962 | struct drm_i915_gem_object *obj, |
Chris Wilson | 6eeefaf | 2010-08-07 11:01:39 +0100 | [diff] [blame] | 1963 | int id, |
| 1964 | int align); |
Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 1965 | void i915_gem_detach_phys_object(struct drm_device *dev, |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1966 | struct drm_i915_gem_object *obj); |
Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 1967 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1968 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1969 | |
Chris Wilson | 467cffb | 2011-03-07 10:42:03 +0000 | [diff] [blame] | 1970 | uint32_t |
Imre Deak | 0fa8779 | 2013-01-07 21:47:35 +0200 | [diff] [blame] | 1971 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); |
| 1972 | uint32_t |
Imre Deak | d865110c | 2013-01-07 21:47:33 +0200 | [diff] [blame] | 1973 | i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, |
| 1974 | int tiling_mode, bool fenced); |
Chris Wilson | 467cffb | 2011-03-07 10:42:03 +0000 | [diff] [blame] | 1975 | |
Chris Wilson | e4ffd17 | 2011-04-04 09:44:39 +0100 | [diff] [blame] | 1976 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
| 1977 | enum i915_cache_level cache_level); |
| 1978 | |
Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 1979 | struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, |
| 1980 | struct dma_buf *dma_buf); |
| 1981 | |
| 1982 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, |
| 1983 | struct drm_gem_object *gem_obj, int flags); |
| 1984 | |
Chris Wilson | 19b2dbd | 2013-06-12 10:15:12 +0100 | [diff] [blame] | 1985 | void i915_gem_restore_fences(struct drm_device *dev); |
| 1986 | |
Ben Widawsky | a70a314 | 2013-07-31 16:59:56 -0700 | [diff] [blame] | 1987 | unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, |
| 1988 | struct i915_address_space *vm); |
| 1989 | bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); |
| 1990 | bool i915_gem_obj_bound(struct drm_i915_gem_object *o, |
| 1991 | struct i915_address_space *vm); |
| 1992 | unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, |
| 1993 | struct i915_address_space *vm); |
| 1994 | struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, |
| 1995 | struct i915_address_space *vm); |
Ben Widawsky | accfef2 | 2013-08-14 11:38:35 +0200 | [diff] [blame] | 1996 | struct i915_vma * |
| 1997 | i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, |
| 1998 | struct i915_address_space *vm); |
Ben Widawsky | a70a314 | 2013-07-31 16:59:56 -0700 | [diff] [blame] | 1999 | /* Some GGTT VM helpers */ |
| 2000 | #define obj_to_ggtt(obj) \ |
| 2001 | (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) |
| 2002 | static inline bool i915_is_ggtt(struct i915_address_space *vm) |
| 2003 | { |
| 2004 | struct i915_address_space *ggtt = |
| 2005 | &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base; |
| 2006 | return vm == ggtt; |
| 2007 | } |
| 2008 | |
| 2009 | static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) |
| 2010 | { |
| 2011 | return i915_gem_obj_bound(obj, obj_to_ggtt(obj)); |
| 2012 | } |
| 2013 | |
| 2014 | static inline unsigned long |
| 2015 | i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) |
| 2016 | { |
| 2017 | return i915_gem_obj_offset(obj, obj_to_ggtt(obj)); |
| 2018 | } |
| 2019 | |
| 2020 | static inline unsigned long |
| 2021 | i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) |
| 2022 | { |
| 2023 | return i915_gem_obj_size(obj, obj_to_ggtt(obj)); |
| 2024 | } |
Ben Widawsky | c37e220 | 2013-07-31 16:59:58 -0700 | [diff] [blame] | 2025 | |
| 2026 | static inline int __must_check |
| 2027 | i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, |
| 2028 | uint32_t alignment, |
| 2029 | bool map_and_fenceable, |
| 2030 | bool nonblocking) |
| 2031 | { |
| 2032 | return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, |
| 2033 | map_and_fenceable, nonblocking); |
| 2034 | } |
Ben Widawsky | a70a314 | 2013-07-31 16:59:56 -0700 | [diff] [blame] | 2035 | #undef obj_to_ggtt |
| 2036 | |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 2037 | /* i915_gem_context.c */ |
| 2038 | void i915_gem_context_init(struct drm_device *dev); |
| 2039 | void i915_gem_context_fini(struct drm_device *dev); |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 2040 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 2041 | int i915_switch_context(struct intel_ring_buffer *ring, |
| 2042 | struct drm_file *file, int to_id); |
Mika Kuoppala | dce3271 | 2013-04-30 13:30:33 +0300 | [diff] [blame] | 2043 | void i915_gem_context_free(struct kref *ctx_ref); |
| 2044 | static inline void i915_gem_context_reference(struct i915_hw_context *ctx) |
| 2045 | { |
| 2046 | kref_get(&ctx->ref); |
| 2047 | } |
| 2048 | |
| 2049 | static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) |
| 2050 | { |
| 2051 | kref_put(&ctx->ref, i915_gem_context_free); |
| 2052 | } |
| 2053 | |
Mika Kuoppala | c0bb617 | 2013-06-12 12:35:29 +0300 | [diff] [blame] | 2054 | struct i915_ctx_hang_stats * __must_check |
Chris Wilson | 11fa338 | 2013-07-03 17:22:06 +0300 | [diff] [blame] | 2055 | i915_gem_context_get_hang_stats(struct drm_device *dev, |
Mika Kuoppala | c0bb617 | 2013-06-12 12:35:29 +0300 | [diff] [blame] | 2056 | struct drm_file *file, |
| 2057 | u32 id); |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 2058 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, |
| 2059 | struct drm_file *file); |
| 2060 | int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, |
| 2061 | struct drm_file *file); |
Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 2062 | |
Daniel Vetter | 76aaf22 | 2010-11-05 22:23:30 +0100 | [diff] [blame] | 2063 | /* i915_gem_gtt.c */ |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 2064 | void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); |
Daniel Vetter | 7bddb01 | 2012-02-09 17:15:47 +0100 | [diff] [blame] | 2065 | void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, |
| 2066 | struct drm_i915_gem_object *obj, |
| 2067 | enum i915_cache_level cache_level); |
| 2068 | void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, |
| 2069 | struct drm_i915_gem_object *obj); |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 2070 | |
Daniel Vetter | 76aaf22 | 2010-11-05 22:23:30 +0100 | [diff] [blame] | 2071 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
Daniel Vetter | 7416390 | 2012-02-15 23:50:21 +0100 | [diff] [blame] | 2072 | int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); |
| 2073 | void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, |
Chris Wilson | e4ffd17 | 2011-04-04 09:44:39 +0100 | [diff] [blame] | 2074 | enum i915_cache_level cache_level); |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 2075 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); |
Daniel Vetter | 7416390 | 2012-02-15 23:50:21 +0100 | [diff] [blame] | 2076 | void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); |
Ben Widawsky | d7e5008 | 2012-12-18 10:31:25 -0800 | [diff] [blame] | 2077 | void i915_gem_init_global_gtt(struct drm_device *dev); |
| 2078 | void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, |
| 2079 | unsigned long mappable_end, unsigned long end); |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 2080 | int i915_gem_gtt_init(struct drm_device *dev); |
Ben Widawsky | d09105c | 2012-11-15 12:06:09 -0800 | [diff] [blame] | 2081 | static inline void i915_gem_chipset_flush(struct drm_device *dev) |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 2082 | { |
| 2083 | if (INTEL_INFO(dev)->gen < 6) |
| 2084 | intel_gtt_chipset_flush(); |
| 2085 | } |
| 2086 | |
Daniel Vetter | 76aaf22 | 2010-11-05 22:23:30 +0100 | [diff] [blame] | 2087 | |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 2088 | /* i915_gem_evict.c */ |
Ben Widawsky | f6cd1f1 | 2013-07-31 17:00:11 -0700 | [diff] [blame] | 2089 | int __must_check i915_gem_evict_something(struct drm_device *dev, |
| 2090 | struct i915_address_space *vm, |
| 2091 | int min_size, |
Chris Wilson | 42d6ab4 | 2012-07-26 11:49:32 +0100 | [diff] [blame] | 2092 | unsigned alignment, |
| 2093 | unsigned cache_level, |
Chris Wilson | 86a1ee2 | 2012-08-11 15:41:04 +0100 | [diff] [blame] | 2094 | bool mappable, |
| 2095 | bool nonblock); |
Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 2096 | int i915_gem_evict_everything(struct drm_device *dev); |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 2097 | |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 2098 | /* i915_gem_stolen.c */ |
| 2099 | int i915_gem_init_stolen(struct drm_device *dev); |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 2100 | int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); |
| 2101 | void i915_gem_stolen_cleanup_compression(struct drm_device *dev); |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 2102 | void i915_gem_cleanup_stolen(struct drm_device *dev); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 2103 | struct drm_i915_gem_object * |
| 2104 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size); |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 2105 | struct drm_i915_gem_object * |
| 2106 | i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, |
| 2107 | u32 stolen_offset, |
| 2108 | u32 gtt_offset, |
| 2109 | u32 size); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 2110 | void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 2111 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2112 | /* i915_gem_tiling.c */ |
Chris Wilson | 2c1792a | 2013-08-01 18:39:55 +0100 | [diff] [blame] | 2113 | static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
Chris Wilson | e9b73c6 | 2012-12-03 21:03:14 +0000 | [diff] [blame] | 2114 | { |
| 2115 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
| 2116 | |
| 2117 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
| 2118 | obj->tiling_mode != I915_TILING_NONE; |
| 2119 | } |
| 2120 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2121 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 2122 | void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
| 2123 | void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2124 | |
| 2125 | /* i915_gem_debug.c */ |
Chris Wilson | 23bc598 | 2010-09-29 16:10:57 +0100 | [diff] [blame] | 2126 | #if WATCH_LISTS |
| 2127 | int i915_verify_lists(struct drm_device *dev); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2128 | #else |
Chris Wilson | 23bc598 | 2010-09-29 16:10:57 +0100 | [diff] [blame] | 2129 | #define i915_verify_lists(dev) 0 |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2130 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2131 | |
Ben Gamari | 2017263 | 2009-02-17 20:08:50 -0500 | [diff] [blame] | 2132 | /* i915_debugfs.c */ |
Ben Gamari | 27c202a | 2009-07-01 22:26:52 -0400 | [diff] [blame] | 2133 | int i915_debugfs_init(struct drm_minor *minor); |
| 2134 | void i915_debugfs_cleanup(struct drm_minor *minor); |
Mika Kuoppala | 84734a0 | 2013-07-12 16:50:57 +0300 | [diff] [blame] | 2135 | |
| 2136 | /* i915_gpu_error.c */ |
Mika Kuoppala | edc3d88 | 2013-05-23 13:55:35 +0300 | [diff] [blame] | 2137 | __printf(2, 3) |
| 2138 | void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); |
Mika Kuoppala | fc16b48 | 2013-06-06 15:18:39 +0300 | [diff] [blame] | 2139 | int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, |
| 2140 | const struct i915_error_state_file_priv *error); |
Mika Kuoppala | 4dc955f | 2013-06-06 15:18:41 +0300 | [diff] [blame] | 2141 | int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, |
| 2142 | size_t count, loff_t pos); |
| 2143 | static inline void i915_error_state_buf_release( |
| 2144 | struct drm_i915_error_state_buf *eb) |
| 2145 | { |
| 2146 | kfree(eb->buf); |
| 2147 | } |
Mika Kuoppala | 84734a0 | 2013-07-12 16:50:57 +0300 | [diff] [blame] | 2148 | void i915_capture_error_state(struct drm_device *dev); |
| 2149 | void i915_error_state_get(struct drm_device *dev, |
| 2150 | struct i915_error_state_file_priv *error_priv); |
| 2151 | void i915_error_state_put(struct i915_error_state_file_priv *error_priv); |
| 2152 | void i915_destroy_error_state(struct drm_device *dev); |
| 2153 | |
| 2154 | void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); |
| 2155 | const char *i915_cache_level_str(int type); |
Ben Gamari | 2017263 | 2009-02-17 20:08:50 -0500 | [diff] [blame] | 2156 | |
Jesse Barnes | 317c35d | 2008-08-25 15:11:06 -0700 | [diff] [blame] | 2157 | /* i915_suspend.c */ |
| 2158 | extern int i915_save_state(struct drm_device *dev); |
| 2159 | extern int i915_restore_state(struct drm_device *dev); |
| 2160 | |
Daniel Vetter | d8157a3 | 2013-01-25 17:53:20 +0100 | [diff] [blame] | 2161 | /* i915_ums.c */ |
| 2162 | void i915_save_display_reg(struct drm_device *dev); |
| 2163 | void i915_restore_display_reg(struct drm_device *dev); |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 2164 | |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 2165 | /* i915_sysfs.c */ |
| 2166 | void i915_setup_sysfs(struct drm_device *dev_priv); |
| 2167 | void i915_teardown_sysfs(struct drm_device *dev_priv); |
| 2168 | |
Chris Wilson | f899fc6 | 2010-07-20 15:44:45 -0700 | [diff] [blame] | 2169 | /* intel_i2c.c */ |
| 2170 | extern int intel_setup_gmbus(struct drm_device *dev); |
| 2171 | extern void intel_teardown_gmbus(struct drm_device *dev); |
Jan-Simon Möller | 8f375e1 | 2013-05-06 14:52:08 +0200 | [diff] [blame] | 2172 | static inline bool intel_gmbus_is_port_valid(unsigned port) |
Daniel Kurtz | 3bd7d90 | 2012-03-28 02:36:14 +0800 | [diff] [blame] | 2173 | { |
Daniel Kurtz | 2ed06c9 | 2012-03-28 02:36:15 +0800 | [diff] [blame] | 2174 | return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); |
Daniel Kurtz | 3bd7d90 | 2012-03-28 02:36:14 +0800 | [diff] [blame] | 2175 | } |
| 2176 | |
| 2177 | extern struct i2c_adapter *intel_gmbus_get_adapter( |
| 2178 | struct drm_i915_private *dev_priv, unsigned port); |
Chris Wilson | e957d77 | 2010-09-24 12:52:03 +0100 | [diff] [blame] | 2179 | extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); |
| 2180 | extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); |
Jan-Simon Möller | 8f375e1 | 2013-05-06 14:52:08 +0200 | [diff] [blame] | 2181 | static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) |
Chris Wilson | b8232e9 | 2010-09-28 16:41:32 +0100 | [diff] [blame] | 2182 | { |
| 2183 | return container_of(adapter, struct intel_gmbus, adapter)->force_bit; |
| 2184 | } |
Chris Wilson | f899fc6 | 2010-07-20 15:44:45 -0700 | [diff] [blame] | 2185 | extern void intel_i2c_reset(struct drm_device *dev); |
| 2186 | |
Chris Wilson | 3b61796 | 2010-08-24 09:02:58 +0100 | [diff] [blame] | 2187 | /* intel_opregion.c */ |
Chris Wilson | 44834a6 | 2010-08-19 16:09:23 +0100 | [diff] [blame] | 2188 | extern int intel_opregion_setup(struct drm_device *dev); |
| 2189 | #ifdef CONFIG_ACPI |
| 2190 | extern void intel_opregion_init(struct drm_device *dev); |
| 2191 | extern void intel_opregion_fini(struct drm_device *dev); |
Chris Wilson | 3b61796 | 2010-08-24 09:02:58 +0100 | [diff] [blame] | 2192 | extern void intel_opregion_asle_intr(struct drm_device *dev); |
Len Brown | 65e082c | 2008-10-24 17:18:10 -0400 | [diff] [blame] | 2193 | #else |
Chris Wilson | 44834a6 | 2010-08-19 16:09:23 +0100 | [diff] [blame] | 2194 | static inline void intel_opregion_init(struct drm_device *dev) { return; } |
| 2195 | static inline void intel_opregion_fini(struct drm_device *dev) { return; } |
Chris Wilson | 3b61796 | 2010-08-24 09:02:58 +0100 | [diff] [blame] | 2196 | static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } |
Len Brown | 65e082c | 2008-10-24 17:18:10 -0400 | [diff] [blame] | 2197 | #endif |
Matthew Garrett | 8ee1c3d | 2008-08-05 19:37:25 +0100 | [diff] [blame] | 2198 | |
Jesse Barnes | 723bfd7 | 2010-10-07 16:01:13 -0700 | [diff] [blame] | 2199 | /* intel_acpi.c */ |
| 2200 | #ifdef CONFIG_ACPI |
| 2201 | extern void intel_register_dsm_handler(void); |
| 2202 | extern void intel_unregister_dsm_handler(void); |
| 2203 | #else |
| 2204 | static inline void intel_register_dsm_handler(void) { return; } |
| 2205 | static inline void intel_unregister_dsm_handler(void) { return; } |
| 2206 | #endif /* CONFIG_ACPI */ |
| 2207 | |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 2208 | /* modesetting */ |
Daniel Vetter | f817586 | 2012-04-10 15:50:11 +0200 | [diff] [blame] | 2209 | extern void intel_modeset_init_hw(struct drm_device *dev); |
Imre Deak | 7d708ee | 2013-04-17 14:04:50 +0300 | [diff] [blame] | 2210 | extern void intel_modeset_suspend_hw(struct drm_device *dev); |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 2211 | extern void intel_modeset_init(struct drm_device *dev); |
Chris Wilson | 2c7111d | 2011-03-29 10:40:27 +0100 | [diff] [blame] | 2212 | extern void intel_modeset_gem_init(struct drm_device *dev); |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 2213 | extern void intel_modeset_cleanup(struct drm_device *dev); |
Dave Airlie | 28d5204 | 2009-09-21 14:33:58 +1000 | [diff] [blame] | 2214 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
Daniel Vetter | 45e2b5f | 2012-11-23 18:16:34 +0100 | [diff] [blame] | 2215 | extern void intel_modeset_setup_hw_state(struct drm_device *dev, |
| 2216 | bool force_restore); |
Daniel Vetter | 44cec74 | 2013-01-25 17:53:21 +0100 | [diff] [blame] | 2217 | extern void i915_redisable_vga(struct drm_device *dev); |
Adam Jackson | ee5382a | 2010-04-23 11:17:39 -0400 | [diff] [blame] | 2218 | extern bool intel_fbc_enabled(struct drm_device *dev); |
Chris Wilson | 43a9539 | 2011-07-08 12:22:36 +0100 | [diff] [blame] | 2219 | extern void intel_disable_fbc(struct drm_device *dev); |
Jesse Barnes | 7648fa9 | 2010-05-20 14:28:11 -0700 | [diff] [blame] | 2220 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
Paulo Zanoni | dde86e2 | 2012-12-01 12:04:25 -0200 | [diff] [blame] | 2221 | extern void intel_init_pch_refclk(struct drm_device *dev); |
Jesse Barnes | 3b8d8d9 | 2010-12-17 14:19:02 -0800 | [diff] [blame] | 2222 | extern void gen6_set_rps(struct drm_device *dev, u8 val); |
Jesse Barnes | 0a073b8 | 2013-04-17 15:54:58 -0700 | [diff] [blame] | 2223 | extern void valleyview_set_rps(struct drm_device *dev, u8 val); |
| 2224 | extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv); |
| 2225 | extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv); |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 2226 | extern void intel_detect_pch(struct drm_device *dev); |
| 2227 | extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 2228 | extern int intel_enable_rc6(const struct drm_device *dev); |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 2229 | |
Ben Widawsky | 2911a35 | 2012-04-05 14:47:36 -0700 | [diff] [blame] | 2230 | extern bool i915_semaphore_is_enabled(struct drm_device *dev); |
Ben Widawsky | c0c7bab | 2012-07-12 11:01:05 -0700 | [diff] [blame] | 2231 | int i915_reg_read_ioctl(struct drm_device *dev, void *data, |
| 2232 | struct drm_file *file); |
Jesse Barnes | 575155a | 2012-03-28 13:39:37 -0700 | [diff] [blame] | 2233 | |
Chris Wilson | 6ef3d42 | 2010-08-04 20:26:07 +0100 | [diff] [blame] | 2234 | /* overlay */ |
| 2235 | extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); |
Mika Kuoppala | edc3d88 | 2013-05-23 13:55:35 +0300 | [diff] [blame] | 2236 | extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, |
| 2237 | struct intel_overlay_error_state *error); |
Chris Wilson | c4a1d9e | 2010-11-21 13:12:35 +0000 | [diff] [blame] | 2238 | |
| 2239 | extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); |
Mika Kuoppala | edc3d88 | 2013-05-23 13:55:35 +0300 | [diff] [blame] | 2240 | extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, |
Chris Wilson | c4a1d9e | 2010-11-21 13:12:35 +0000 | [diff] [blame] | 2241 | struct drm_device *dev, |
| 2242 | struct intel_display_error_state *error); |
Chris Wilson | 6ef3d42 | 2010-08-04 20:26:07 +0100 | [diff] [blame] | 2243 | |
Ben Widawsky | b7287d8 | 2011-04-25 11:22:22 -0700 | [diff] [blame] | 2244 | /* On SNB platform, before reading ring registers forcewake bit |
| 2245 | * must be set to prevent GT core from power down and stale values being |
| 2246 | * returned. |
| 2247 | */ |
Ben Widawsky | fcca792 | 2011-04-25 11:23:07 -0700 | [diff] [blame] | 2248 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); |
| 2249 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
Ben Widawsky | b7287d8 | 2011-04-25 11:22:22 -0700 | [diff] [blame] | 2250 | |
Ben Widawsky | 42c0526 | 2012-09-26 10:34:00 -0700 | [diff] [blame] | 2251 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); |
| 2252 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); |
Jani Nikula | 59de081 | 2013-05-22 15:36:16 +0300 | [diff] [blame] | 2253 | |
| 2254 | /* intel_sideband.c */ |
Jani Nikula | 6493625 | 2013-05-22 15:36:20 +0300 | [diff] [blame] | 2255 | u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); |
| 2256 | void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val); |
| 2257 | u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); |
Jani Nikula | e9f882a | 2013-08-27 15:12:14 +0300 | [diff] [blame] | 2258 | u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); |
| 2259 | void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
| 2260 | u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); |
| 2261 | void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
| 2262 | u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); |
| 2263 | void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
| 2264 | u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); |
| 2265 | void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
Jani Nikula | ae99258 | 2013-05-22 15:36:19 +0300 | [diff] [blame] | 2266 | u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg); |
| 2267 | void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val); |
Jani Nikula | 59de081 | 2013-05-22 15:36:16 +0300 | [diff] [blame] | 2268 | u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, |
| 2269 | enum intel_sbi_destination destination); |
| 2270 | void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, |
| 2271 | enum intel_sbi_destination destination); |
Jesse Barnes | 0a073b8 | 2013-04-17 15:54:58 -0700 | [diff] [blame] | 2272 | |
Jesse Barnes | 855ba3b | 2013-04-17 15:54:57 -0700 | [diff] [blame] | 2273 | int vlv_gpu_freq(int ddr_freq, int val); |
| 2274 | int vlv_freq_opcode(int ddr_freq, int val); |
Ben Widawsky | 42c0526 | 2012-09-26 10:34:00 -0700 | [diff] [blame] | 2275 | |
Chris Wilson | 6af5d92 | 2013-07-19 20:36:53 +0100 | [diff] [blame] | 2276 | #define __i915_read(x) \ |
Chris Wilson | dba8e41 | 2013-07-19 20:36:54 +0100 | [diff] [blame] | 2277 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace); |
Chris Wilson | 6af5d92 | 2013-07-19 20:36:53 +0100 | [diff] [blame] | 2278 | __i915_read(8) |
| 2279 | __i915_read(16) |
| 2280 | __i915_read(32) |
| 2281 | __i915_read(64) |
Keith Packard | 5f75377 | 2010-11-22 09:24:22 +0000 | [diff] [blame] | 2282 | #undef __i915_read |
| 2283 | |
Chris Wilson | 6af5d92 | 2013-07-19 20:36:53 +0100 | [diff] [blame] | 2284 | #define __i915_write(x) \ |
Chris Wilson | dba8e41 | 2013-07-19 20:36:54 +0100 | [diff] [blame] | 2285 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace); |
Chris Wilson | 6af5d92 | 2013-07-19 20:36:53 +0100 | [diff] [blame] | 2286 | __i915_write(8) |
| 2287 | __i915_write(16) |
| 2288 | __i915_write(32) |
| 2289 | __i915_write(64) |
Keith Packard | 5f75377 | 2010-11-22 09:24:22 +0000 | [diff] [blame] | 2290 | #undef __i915_write |
| 2291 | |
Chris Wilson | dba8e41 | 2013-07-19 20:36:54 +0100 | [diff] [blame] | 2292 | #define I915_READ8(reg) i915_read8(dev_priv, (reg), true) |
| 2293 | #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true) |
Keith Packard | 5f75377 | 2010-11-22 09:24:22 +0000 | [diff] [blame] | 2294 | |
Chris Wilson | dba8e41 | 2013-07-19 20:36:54 +0100 | [diff] [blame] | 2295 | #define I915_READ16(reg) i915_read16(dev_priv, (reg), true) |
| 2296 | #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true) |
| 2297 | #define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false) |
| 2298 | #define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false) |
Keith Packard | 5f75377 | 2010-11-22 09:24:22 +0000 | [diff] [blame] | 2299 | |
Chris Wilson | dba8e41 | 2013-07-19 20:36:54 +0100 | [diff] [blame] | 2300 | #define I915_READ(reg) i915_read32(dev_priv, (reg), true) |
| 2301 | #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true) |
| 2302 | #define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false) |
| 2303 | #define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false) |
Keith Packard | 5f75377 | 2010-11-22 09:24:22 +0000 | [diff] [blame] | 2304 | |
Chris Wilson | dba8e41 | 2013-07-19 20:36:54 +0100 | [diff] [blame] | 2305 | #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true) |
| 2306 | #define I915_READ64(reg) i915_read64(dev_priv, (reg), true) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 2307 | |
| 2308 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
| 2309 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
| 2310 | |
Ville Syrjälä | 55bc60d | 2013-01-17 16:31:29 +0200 | [diff] [blame] | 2311 | /* "Broadcast RGB" property */ |
| 2312 | #define INTEL_BROADCAST_RGB_AUTO 0 |
| 2313 | #define INTEL_BROADCAST_RGB_FULL 1 |
| 2314 | #define INTEL_BROADCAST_RGB_LIMITED 2 |
Yuanhan Liu | ba4f01a | 2010-11-08 17:09:41 +0800 | [diff] [blame] | 2315 | |
Ville Syrjälä | 766aa1c | 2013-01-25 21:44:46 +0200 | [diff] [blame] | 2316 | static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) |
| 2317 | { |
| 2318 | if (HAS_PCH_SPLIT(dev)) |
| 2319 | return CPU_VGACNTRL; |
| 2320 | else if (IS_VALLEYVIEW(dev)) |
| 2321 | return VLV_VGACNTRL; |
| 2322 | else |
| 2323 | return VGACNTRL; |
| 2324 | } |
| 2325 | |
Ville Syrjälä | 2bb4629 | 2013-02-22 16:12:51 +0200 | [diff] [blame] | 2326 | static inline void __user *to_user_ptr(u64 address) |
| 2327 | { |
| 2328 | return (void __user *)(uintptr_t)address; |
| 2329 | } |
| 2330 | |
Imre Deak | df97729 | 2013-05-21 20:03:17 +0300 | [diff] [blame] | 2331 | static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) |
| 2332 | { |
| 2333 | unsigned long j = msecs_to_jiffies(m); |
| 2334 | |
| 2335 | return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); |
| 2336 | } |
| 2337 | |
| 2338 | static inline unsigned long |
| 2339 | timespec_to_jiffies_timeout(const struct timespec *value) |
| 2340 | { |
| 2341 | unsigned long j = timespec_to_jiffies(value); |
| 2342 | |
| 2343 | return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); |
| 2344 | } |
| 2345 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2346 | #endif |