blob: 290c12dd6e6b82af08c8d72aeb1389c1ff01c8bb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Dave Airliebc54fd12005-06-23 22:46:46 +10004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10007 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110028 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
Chris Wilsone9b73c62012-12-03 21:03:14 +000033#include <uapi/drm/i915_drm.h>
34
Jesse Barnes585fb112008-07-29 11:54:06 -070035#include "i915_reg.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080036#include "intel_bios.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080037#include "intel_ringbuffer.h"
Keith Packard0839ccb2008-10-30 19:38:48 -070038#include <linux/io-mapping.h>
Chris Wilsonf899fc62010-07-20 15:44:45 -070039#include <linux/i2c.h>
Daniel Vetterc167a6f2012-02-28 00:43:09 +010040#include <linux/i2c-algo-bit.h>
Daniel Vetter0ade6382010-08-24 22:18:41 +020041#include <drm/intel-gtt.h>
Matthew Garrettaaa6fd22011-08-12 12:11:33 +020042#include <linux/backlight.h>
Ben Widawsky2911a352012-04-05 14:47:36 -070043#include <linux/intel-iommu.h>
Daniel Vetter742cbee2012-04-27 15:17:39 +020044#include <linux/kref.h>
Daniel Vetter9ee32fea2012-12-01 13:53:48 +010045#include <linux/pm_qos.h>
Jesse Barnes585fb112008-07-29 11:54:06 -070046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047/* General customization:
48 */
49
50#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
51
52#define DRIVER_NAME "i915"
53#define DRIVER_DESC "Intel Graphics"
Eric Anholt673a3942008-07-30 12:06:12 -070054#define DRIVER_DATE "20080730"
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Jesse Barnes317c35d2008-08-25 15:11:06 -070056enum pipe {
57 PIPE_A = 0,
58 PIPE_B,
Jesse Barnes9db4a9c2011-02-07 12:26:52 -080059 PIPE_C,
60 I915_MAX_PIPES
Jesse Barnes317c35d2008-08-25 15:11:06 -070061};
Jesse Barnes9db4a9c2011-02-07 12:26:52 -080062#define pipe_name(p) ((p) + 'A')
Jesse Barnes317c35d2008-08-25 15:11:06 -070063
Paulo Zanonia5c961d2012-10-24 15:59:34 -020064enum transcoder {
65 TRANSCODER_A = 0,
66 TRANSCODER_B,
67 TRANSCODER_C,
68 TRANSCODER_EDP = 0xF,
69};
70#define transcoder_name(t) ((t) + 'A')
71
Jesse Barnes80824002009-09-10 15:28:06 -070072enum plane {
73 PLANE_A = 0,
74 PLANE_B,
Jesse Barnes9db4a9c2011-02-07 12:26:52 -080075 PLANE_C,
Jesse Barnes80824002009-09-10 15:28:06 -070076};
Jesse Barnes9db4a9c2011-02-07 12:26:52 -080077#define plane_name(p) ((p) + 'A')
Keith Packard52440212008-11-18 09:30:25 -080078
Ville Syrjälä06da8da2013-04-17 17:48:51 +030079#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
80
Eugeni Dodonov2b139522012-03-29 12:32:22 -030081enum port {
82 PORT_A = 0,
83 PORT_B,
84 PORT_C,
85 PORT_D,
86 PORT_E,
87 I915_MAX_PORTS
88};
89#define port_name(p) ((p) + 'A')
90
Paulo Zanonib97186f2013-05-03 12:15:36 -030091enum intel_display_power_domain {
92 POWER_DOMAIN_PIPE_A,
93 POWER_DOMAIN_PIPE_B,
94 POWER_DOMAIN_PIPE_C,
95 POWER_DOMAIN_PIPE_A_PANEL_FITTER,
96 POWER_DOMAIN_PIPE_B_PANEL_FITTER,
97 POWER_DOMAIN_PIPE_C_PANEL_FITTER,
98 POWER_DOMAIN_TRANSCODER_A,
99 POWER_DOMAIN_TRANSCODER_B,
100 POWER_DOMAIN_TRANSCODER_C,
101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
102};
103
104#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
105#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
106 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
107#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
108
Egbert Eich1d843f92013-02-25 12:06:49 -0500109enum hpd_pin {
110 HPD_NONE = 0,
111 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
112 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
113 HPD_CRT,
114 HPD_SDVO_B,
115 HPD_SDVO_C,
116 HPD_PORT_B,
117 HPD_PORT_C,
118 HPD_PORT_D,
119 HPD_NUM_PINS
120};
121
Chris Wilson2a2d5482012-12-03 11:49:06 +0000122#define I915_GEM_GPU_DOMAINS \
123 (I915_GEM_DOMAIN_RENDER | \
124 I915_GEM_DOMAIN_SAMPLER | \
125 I915_GEM_DOMAIN_COMMAND | \
126 I915_GEM_DOMAIN_INSTRUCTION | \
127 I915_GEM_DOMAIN_VERTEX)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700128
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700129#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800130
Daniel Vetter6c2b7c122012-07-05 09:50:24 +0200131#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
132 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
133 if ((intel_encoder)->base.crtc == (__crtc))
134
Daniel Vettere7b903d2013-06-05 13:34:14 +0200135struct drm_i915_private;
136
Daniel Vettere2b78262013-06-07 23:10:03 +0200137enum intel_dpll_id {
138 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
139 /* real shared dpll ids must be >= 0 */
140 DPLL_ID_PCH_PLL_A,
141 DPLL_ID_PCH_PLL_B,
142};
Jesse Barnesee7b9f92012-04-20 17:11:53 +0100143#define I915_NUM_PLLS 2
144
Daniel Vetter53589012013-06-05 13:34:16 +0200145struct intel_dpll_hw_state {
Daniel Vetter66e985c2013-06-05 13:34:20 +0200146 uint32_t dpll;
Daniel Vetter8bcc2792013-06-05 13:34:28 +0200147 uint32_t dpll_md;
Daniel Vetter66e985c2013-06-05 13:34:20 +0200148 uint32_t fp0;
149 uint32_t fp1;
Daniel Vetter53589012013-06-05 13:34:16 +0200150};
151
Daniel Vetter46edb022013-06-05 13:34:12 +0200152struct intel_shared_dpll {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 int refcount; /* count of number of CRTCs sharing this PLL */
154 int active; /* count of number of active CRTCs (i.e. DPMS on) */
155 bool on; /* is the PLL actually active? Disabled during modeset */
Daniel Vetter46edb022013-06-05 13:34:12 +0200156 const char *name;
157 /* should match the index in the dev_priv->shared_dplls array */
158 enum intel_dpll_id id;
Daniel Vetter53589012013-06-05 13:34:16 +0200159 struct intel_dpll_hw_state hw_state;
Daniel Vetter15bdd4c2013-06-05 13:34:23 +0200160 void (*mode_set)(struct drm_i915_private *dev_priv,
161 struct intel_shared_dpll *pll);
Daniel Vettere7b903d2013-06-05 13:34:14 +0200162 void (*enable)(struct drm_i915_private *dev_priv,
163 struct intel_shared_dpll *pll);
164 void (*disable)(struct drm_i915_private *dev_priv,
165 struct intel_shared_dpll *pll);
Daniel Vetter53589012013-06-05 13:34:16 +0200166 bool (*get_hw_state)(struct drm_i915_private *dev_priv,
167 struct intel_shared_dpll *pll,
168 struct intel_dpll_hw_state *hw_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Daniel Vettere69d0bc2012-11-29 15:59:36 +0100171/* Used by dp and fdi links */
172struct intel_link_m_n {
173 uint32_t tu;
174 uint32_t gmch_m;
175 uint32_t gmch_n;
176 uint32_t link_m;
177 uint32_t link_n;
178};
179
180void intel_link_compute_m_n(int bpp, int nlanes,
181 int pixel_clock, int link_clock,
182 struct intel_link_m_n *m_n);
183
Paulo Zanoni6441ab52012-10-05 12:05:58 -0300184struct intel_ddi_plls {
185 int spll_refcount;
186 int wrpll1_refcount;
187 int wrpll2_refcount;
188};
189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190/* Interface history:
191 *
192 * 1.1: Original.
Dave Airlie0d6aa602006-01-02 20:14:23 +1100193 * 1.2: Add Power Management
194 * 1.3: Add vblank support
Dave Airliede227f52006-01-25 15:31:43 +1100195 * 1.4: Fix cmdbuffer path, add heap destroy
Dave Airlie702880f2006-06-24 17:07:34 +1000196 * 1.5: Add vblank pipe configuration
=?utf-8?q?Michel_D=C3=A4nzer?=2228ed62006-10-25 01:05:09 +1000197 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
198 * - Support vertical blank on secondary display pipe
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 */
200#define DRIVER_MAJOR 1
=?utf-8?q?Michel_D=C3=A4nzer?=2228ed62006-10-25 01:05:09 +1000201#define DRIVER_MINOR 6
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202#define DRIVER_PATCHLEVEL 0
203
Eric Anholt673a3942008-07-30 12:06:12 -0700204#define WATCH_COHERENCY 0
Chris Wilson23bc5982010-09-29 16:10:57 +0100205#define WATCH_LISTS 0
Chris Wilson42d6ab42012-07-26 11:49:32 +0100206#define WATCH_GTT 0
Eric Anholt673a3942008-07-30 12:06:12 -0700207
Dave Airlie71acb5e2008-12-30 20:31:46 +1000208#define I915_GEM_PHYS_CURSOR_0 1
209#define I915_GEM_PHYS_CURSOR_1 2
210#define I915_GEM_PHYS_OVERLAY_REGS 3
211#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
212
213struct drm_i915_gem_phys_object {
214 int id;
215 struct page **page_list;
216 drm_dma_handle_t *handle;
Chris Wilson05394f32010-11-08 19:18:58 +0000217 struct drm_i915_gem_object *cur_obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +1000218};
219
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700220struct opregion_header;
221struct opregion_acpi;
222struct opregion_swsci;
223struct opregion_asle;
224
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +0100225struct intel_opregion {
Ben Widawsky5bc44182012-04-16 14:07:42 -0700226 struct opregion_header __iomem *header;
227 struct opregion_acpi __iomem *acpi;
228 struct opregion_swsci __iomem *swsci;
229 struct opregion_asle __iomem *asle;
230 void __iomem *vbt;
Chris Wilson01fe9db2011-01-16 19:37:30 +0000231 u32 __iomem *lid_state;
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +0100232};
Chris Wilson44834a62010-08-19 16:09:23 +0100233#define OPREGION_SIZE (8*1024)
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +0100234
Chris Wilson6ef3d422010-08-04 20:26:07 +0100235struct intel_overlay;
236struct intel_overlay_error_state;
237
Dave Airlie7c1c2872008-11-28 14:22:24 +1000238struct drm_i915_master_private {
239 drm_local_map_t *sarea;
240 struct _drm_i915_sarea *sarea_priv;
241};
Jesse Barnesde151cf2008-11-12 10:03:55 -0800242#define I915_FENCE_REG_NONE -1
Ville Syrjälä42b5aea2013-04-09 13:02:47 +0300243#define I915_MAX_NUM_FENCES 32
244/* 32 fences + sign bit for FENCE_REG_NONE */
245#define I915_MAX_NUM_FENCE_BITS 6
Jesse Barnesde151cf2008-11-12 10:03:55 -0800246
247struct drm_i915_fence_reg {
Daniel Vetter007cc8a2010-04-28 11:02:31 +0200248 struct list_head lru_list;
Chris Wilsoncaea7472010-11-12 13:53:37 +0000249 struct drm_i915_gem_object *obj;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100250 int pin_count;
Jesse Barnesde151cf2008-11-12 10:03:55 -0800251};
Dave Airlie7c1c2872008-11-28 14:22:24 +1000252
yakui_zhao9b9d1722009-05-31 17:17:17 +0800253struct sdvo_device_mapping {
Chris Wilsone957d772010-09-24 12:52:03 +0100254 u8 initialized;
yakui_zhao9b9d1722009-05-31 17:17:17 +0800255 u8 dvo_port;
256 u8 slave_addr;
257 u8 dvo_wiring;
Chris Wilsone957d772010-09-24 12:52:03 +0100258 u8 i2c_pin;
Adam Jacksonb1083332010-04-23 16:07:40 -0400259 u8 ddc_pin;
yakui_zhao9b9d1722009-05-31 17:17:17 +0800260};
261
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +0000262struct intel_display_error_state;
263
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700264struct drm_i915_error_state {
Daniel Vetter742cbee2012-04-27 15:17:39 +0200265 struct kref ref;
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700266 u32 eir;
267 u32 pgtbl_er;
Ben Widawskybe998e22012-04-26 16:03:00 -0700268 u32 ier;
Ben Widawskyb9a39062012-06-04 14:42:52 -0700269 u32 ccid;
Chris Wilson0f3b6842013-01-15 12:05:55 +0000270 u32 derrmr;
271 u32 forcewake;
Ben Widawsky9574b3f2012-04-26 16:03:01 -0700272 bool waiting[I915_NUM_RINGS];
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800273 u32 pipestat[I915_MAX_PIPES];
Daniel Vetterc1cd90e2011-12-14 13:57:02 +0100274 u32 tail[I915_NUM_RINGS];
275 u32 head[I915_NUM_RINGS];
Chris Wilson0f3b6842013-01-15 12:05:55 +0000276 u32 ctl[I915_NUM_RINGS];
Daniel Vetterd27b1e02011-12-14 13:57:01 +0100277 u32 ipeir[I915_NUM_RINGS];
278 u32 ipehr[I915_NUM_RINGS];
279 u32 instdone[I915_NUM_RINGS];
280 u32 acthd[I915_NUM_RINGS];
Daniel Vetter7e3b8732012-02-01 22:26:45 +0100281 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
Chris Wilsondf2b23d2012-11-27 17:06:54 +0000282 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
Chris Wilson12f55812012-07-05 17:14:01 +0100283 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
Daniel Vetter7e3b8732012-02-01 22:26:45 +0100284 /* our own tracking of ring head and tail */
285 u32 cpu_ring_head[I915_NUM_RINGS];
286 u32 cpu_ring_tail[I915_NUM_RINGS];
Chris Wilson1d8f38f2010-10-29 19:00:51 +0100287 u32 error; /* gen6+ */
Ben Widawsky71e172e2012-08-20 16:15:13 -0700288 u32 err_int; /* gen7 */
Daniel Vetterc1cd90e2011-12-14 13:57:02 +0100289 u32 instpm[I915_NUM_RINGS];
290 u32 instps[I915_NUM_RINGS];
Ben Widawsky050ee912012-08-22 11:32:15 -0700291 u32 extra_instdone[I915_NUM_INSTDONE_REG];
Daniel Vetterd27b1e02011-12-14 13:57:01 +0100292 u32 seqno[I915_NUM_RINGS];
Chris Wilson9df30792010-02-18 10:24:56 +0000293 u64 bbaddr;
Daniel Vetter33f3f512011-12-14 13:57:39 +0100294 u32 fault_reg[I915_NUM_RINGS];
295 u32 done_reg;
Daniel Vetterc1cd90e2011-12-14 13:57:02 +0100296 u32 faddr[I915_NUM_RINGS];
Daniel Vetter4b9de732011-10-09 21:52:02 +0200297 u64 fence[I915_MAX_NUM_FENCES];
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700298 struct timeval time;
Chris Wilson52d39a22012-02-15 11:25:37 +0000299 struct drm_i915_error_ring {
300 struct drm_i915_error_object {
301 int page_count;
302 u32 gtt_offset;
303 u32 *pages[0];
Ben Widawsky8c123e52013-03-04 17:00:29 -0800304 } *ringbuffer, *batchbuffer, *ctx;
Chris Wilson52d39a22012-02-15 11:25:37 +0000305 struct drm_i915_error_request {
306 long jiffies;
307 u32 seqno;
Chris Wilsonee4f42b2012-02-15 11:25:38 +0000308 u32 tail;
Chris Wilson52d39a22012-02-15 11:25:37 +0000309 } *requests;
310 int num_requests;
311 } ring[I915_NUM_RINGS];
Chris Wilson9df30792010-02-18 10:24:56 +0000312 struct drm_i915_error_buffer {
Chris Wilsona779e5a2011-01-09 21:07:49 +0000313 u32 size;
Chris Wilson9df30792010-02-18 10:24:56 +0000314 u32 name;
Chris Wilson0201f1e2012-07-20 12:41:01 +0100315 u32 rseqno, wseqno;
Chris Wilson9df30792010-02-18 10:24:56 +0000316 u32 gtt_offset;
317 u32 read_domains;
318 u32 write_domain;
Daniel Vetter4b9de732011-10-09 21:52:02 +0200319 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
Chris Wilson9df30792010-02-18 10:24:56 +0000320 s32 pinned:2;
321 u32 tiling:2;
322 u32 dirty:1;
323 u32 purgeable:1;
Daniel Vetter5d1333f2012-02-16 11:03:29 +0100324 s32 ring:4;
Chris Wilson93dfb402011-03-29 16:59:50 -0700325 u32 cache_level:2;
Chris Wilsonc724e8a2010-11-22 08:07:02 +0000326 } *active_bo, *pinned_bo;
327 u32 active_bo_count, pinned_bo_count;
Chris Wilson6ef3d422010-08-04 20:26:07 +0100328 struct intel_overlay_error_state *overlay;
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +0000329 struct intel_display_error_state *display;
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700330};
331
Daniel Vetterb8cecdf2013-03-27 00:44:50 +0100332struct intel_crtc_config;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +0100333struct intel_crtc;
Daniel Vetteree9300b2013-06-03 22:40:22 +0200334struct intel_limit;
335struct dpll;
Daniel Vetterb8cecdf2013-03-27 00:44:50 +0100336
Jesse Barnese70236a2009-09-21 10:42:27 -0700337struct drm_i915_display_funcs {
Adam Jacksonee5382a2010-04-23 11:17:39 -0400338 bool (*fbc_enabled)(struct drm_device *dev);
Jesse Barnese70236a2009-09-21 10:42:27 -0700339 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
340 void (*disable_fbc)(struct drm_device *dev);
341 int (*get_display_clock_speed)(struct drm_device *dev);
342 int (*get_fifo_size)(struct drm_device *dev, int plane);
Daniel Vetteree9300b2013-06-03 22:40:22 +0200343 /**
344 * find_dpll() - Find the best values for the PLL
345 * @limit: limits for the PLL
346 * @crtc: current CRTC
347 * @target: target frequency in kHz
348 * @refclk: reference clock frequency in kHz
349 * @match_clock: if provided, @best_clock P divider must
350 * match the P divider from @match_clock
351 * used for LVDS downclocking
352 * @best_clock: best PLL values found
353 *
354 * Returns true on success, false on failure.
355 */
356 bool (*find_dpll)(const struct intel_limit *limit,
357 struct drm_crtc *crtc,
358 int target, int refclk,
359 struct dpll *match_clock,
360 struct dpll *best_clock);
Chris Wilsond2102462011-01-24 17:43:27 +0000361 void (*update_wm)(struct drm_device *dev);
Jesse Barnesb840d907f2011-12-13 13:19:38 -0800362 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
Paulo Zanoni4c4ff432013-05-24 11:59:17 -0300363 uint32_t sprite_width, int pixel_size,
Ville Syrjäläbdd57d02013-07-05 11:57:13 +0300364 bool enable, bool scaled);
Daniel Vetter47fab732012-10-26 10:58:18 +0200365 void (*modeset_global_resources)(struct drm_device *dev);
Daniel Vetter0e8ffe12013-03-28 10:42:00 +0100366 /* Returns the active state of the crtc, and if the crtc is active,
367 * fills out the pipe-config with the hw state. */
368 bool (*get_pipe_config)(struct intel_crtc *,
369 struct intel_crtc_config *);
Jesse Barnesf1f644d2013-06-27 00:39:25 +0300370 void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
Eric Anholtf564048e2011-03-30 13:01:02 -0700371 int (*crtc_mode_set)(struct drm_crtc *crtc,
Eric Anholtf564048e2011-03-30 13:01:02 -0700372 int x, int y,
373 struct drm_framebuffer *old_fb);
Daniel Vetter76e5a892012-06-29 22:39:33 +0200374 void (*crtc_enable)(struct drm_crtc *crtc);
375 void (*crtc_disable)(struct drm_crtc *crtc);
Jesse Barnesee7b9f92012-04-20 17:11:53 +0100376 void (*off)(struct drm_crtc *crtc);
Wu Fengguange0dac652011-09-05 14:25:34 +0800377 void (*write_eld)(struct drm_connector *connector,
378 struct drm_crtc *crtc);
Jesse Barnes674cf962011-04-28 14:27:04 -0700379 void (*fdi_link_train)(struct drm_crtc *crtc);
Jesse Barnes6067aae2011-04-28 15:04:31 -0700380 void (*init_clock_gating)(struct drm_device *dev);
Jesse Barnes8c9f3aa2011-06-16 09:19:13 -0700381 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
382 struct drm_framebuffer *fb,
383 struct drm_i915_gem_object *obj);
Jesse Barnes17638cd2011-06-24 12:19:23 -0700384 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
385 int x, int y);
Daniel Vetter20afbda2012-12-11 14:05:07 +0100386 void (*hpd_irq_setup)(struct drm_device *dev);
Jesse Barnese70236a2009-09-21 10:42:27 -0700387 /* clock updates for mode set */
388 /* cursor updates */
389 /* render clock increase/decrease */
390 /* display clock increase/decrease */
391 /* pll clock increase/decrease */
Jesse Barnese70236a2009-09-21 10:42:27 -0700392};
393
Chris Wilson907b28c2013-07-19 20:36:52 +0100394struct intel_uncore_funcs {
Chris Wilson990bbda2012-07-02 11:51:02 -0300395 void (*force_wake_get)(struct drm_i915_private *dev_priv);
396 void (*force_wake_put)(struct drm_i915_private *dev_priv);
397};
398
Chris Wilson907b28c2013-07-19 20:36:52 +0100399struct intel_uncore {
400 spinlock_t lock; /** lock is also taken in irq contexts. */
401
402 struct intel_uncore_funcs funcs;
403
404 unsigned fifo_count;
405 unsigned forcewake_count;
406};
407
Damien Lespiau79fc46d2013-04-23 16:37:17 +0100408#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
409 func(is_mobile) sep \
410 func(is_i85x) sep \
411 func(is_i915g) sep \
412 func(is_i945gm) sep \
413 func(is_g33) sep \
414 func(need_gfx_hws) sep \
415 func(is_g4x) sep \
416 func(is_pineview) sep \
417 func(is_broadwater) sep \
418 func(is_crestline) sep \
419 func(is_ivybridge) sep \
420 func(is_valleyview) sep \
421 func(is_haswell) sep \
422 func(has_force_wake) sep \
423 func(has_fbc) sep \
424 func(has_pipe_cxsr) sep \
425 func(has_hotplug) sep \
426 func(cursor_needs_physical) sep \
427 func(has_overlay) sep \
428 func(overlay_needs_physical) sep \
429 func(supports_tv) sep \
430 func(has_bsd_ring) sep \
431 func(has_blt_ring) sep \
Xiang, Haihaof72a1182013-05-28 19:22:22 -0700432 func(has_vebox_ring) sep \
Damien Lespiaudd93be52013-04-22 18:40:39 +0100433 func(has_llc) sep \
Damien Lespiau30568c42013-04-22 18:40:41 +0100434 func(has_ddi) sep \
435 func(has_fpga_dbg)
Daniel Vetterc96ea642012-08-08 22:01:51 +0200436
Damien Lespiaua587f772013-04-22 18:40:38 +0100437#define DEFINE_FLAG(name) u8 name:1
438#define SEP_SEMICOLON ;
Eugeni Dodonov3d29b842012-01-17 14:43:53 -0200439
Kristian Høgsbergcfdf1fa2009-12-16 15:16:16 -0500440struct intel_device_info {
Ville Syrjälä10fce672013-01-24 15:29:28 +0200441 u32 display_mmio_offset;
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700442 u8 num_pipes:3;
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000443 u8 gen;
Damien Lespiaua587f772013-04-22 18:40:38 +0100444 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
Kristian Høgsbergcfdf1fa2009-12-16 15:16:16 -0500445};
446
Damien Lespiaua587f772013-04-22 18:40:38 +0100447#undef DEFINE_FLAG
448#undef SEP_SEMICOLON
449
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800450enum i915_cache_level {
451 I915_CACHE_NONE = 0,
Chris Wilson350ec882013-08-06 13:17:02 +0100452 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
453 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
454 caches, eg sampler/render caches, and the
455 large Last-Level-Cache. LLC is coherent with
456 the CPU, but L3 is only visible to the GPU. */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800457};
458
Kenneth Graunke2d04bef2013-04-22 00:53:49 -0700459typedef uint32_t gen6_gtt_pte_t;
460
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700461struct i915_address_space {
Ben Widawsky93bd8642013-07-16 16:50:06 -0700462 struct drm_mm mm;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700463 struct drm_device *dev;
Ben Widawskya7bbbd62013-07-16 16:50:07 -0700464 struct list_head global_link;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700465 unsigned long start; /* Start offset always 0 for dri2 */
466 size_t total; /* size addr space maps (ex. 2GB for ggtt) */
467
468 struct {
469 dma_addr_t addr;
470 struct page *page;
471 } scratch;
472
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700473 /**
474 * List of objects currently involved in rendering.
475 *
476 * Includes buffers having the contents of their GPU caches
477 * flushed, not necessarily primitives. last_rendering_seqno
478 * represents when the rendering involved will be completed.
479 *
480 * A reference is held on the buffer while on this list.
481 */
482 struct list_head active_list;
483
484 /**
485 * LRU list of objects which are not in the ringbuffer and
486 * are ready to unbind, but are still in the GTT.
487 *
488 * last_rendering_seqno is 0 while an object is in this list.
489 *
490 * A reference is not held on the buffer while on this list,
491 * as merely being GTT-bound shouldn't prevent its being
492 * freed, and we'll pull it off the list in the free path.
493 */
494 struct list_head inactive_list;
495
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700496 /* FIXME: Need a more generic return type */
497 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
498 enum i915_cache_level level);
499 void (*clear_range)(struct i915_address_space *vm,
500 unsigned int first_entry,
501 unsigned int num_entries);
502 void (*insert_entries)(struct i915_address_space *vm,
503 struct sg_table *st,
504 unsigned int first_entry,
505 enum i915_cache_level cache_level);
506 void (*cleanup)(struct i915_address_space *vm);
507};
508
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800509/* The Graphics Translation Table is the way in which GEN hardware translates a
510 * Graphics Virtual Address into a Physical Address. In addition to the normal
511 * collateral associated with any va->pa translations GEN hardware also has a
512 * portion of the GTT which can be mapped by the CPU and remain both coherent
513 * and correct (in cases like swizzling). That region is referred to as GMADR in
514 * the spec.
515 */
516struct i915_gtt {
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700517 struct i915_address_space base;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800518 size_t stolen_size; /* Total size of stolen memory */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800519
520 unsigned long mappable_end; /* End offset that we can CPU map */
521 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
522 phys_addr_t mappable_base; /* PA of our GMADR */
523
524 /** "Graphics Stolen Memory" holds the global PTEs */
525 void __iomem *gsm;
Ben Widawskya81cc002013-01-18 12:30:31 -0800526
527 bool do_idle_maps;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800528
Ben Widawsky911bdf02013-06-27 16:30:23 -0700529 int mtrr;
530
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800531 /* global gtt ops */
Ben Widawskybaa09f52013-01-24 13:49:57 -0800532 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800533 size_t *stolen, phys_addr_t *mappable_base,
534 unsigned long *mappable_end);
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800535};
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700536#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800537
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100538struct i915_hw_ppgtt {
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700539 struct i915_address_space base;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100540 unsigned num_pd_entries;
541 struct page **pt_pages;
542 uint32_t pd_offset;
543 dma_addr_t *pt_dma_addr;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800544
Ben Widawskyb7c36d22013-04-08 18:43:56 -0700545 int (*enable)(struct drm_device *dev);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100546};
547
Ben Widawsky2f633152013-07-17 12:19:03 -0700548/* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
549 * will always be <= an objects lifetime. So object refcounting should cover us.
550 */
551struct i915_vma {
552 struct drm_mm_node node;
553 struct drm_i915_gem_object *obj;
554 struct i915_address_space *vm;
555
556 struct list_head vma_link; /* Link in the object's VMA list */
557};
558
Mika Kuoppalae59ec132013-06-12 12:35:28 +0300559struct i915_ctx_hang_stats {
560 /* This context had batch pending when hang was declared */
561 unsigned batch_pending;
562
563 /* This context had batch active when hang was declared */
564 unsigned batch_active;
565};
Ben Widawsky40521052012-06-04 14:42:43 -0700566
567/* This must match up with the value previously used for execbuf2.rsvd1. */
568#define DEFAULT_CONTEXT_ID 0
569struct i915_hw_context {
Mika Kuoppaladce32712013-04-30 13:30:33 +0300570 struct kref ref;
Ben Widawsky40521052012-06-04 14:42:43 -0700571 int id;
Ben Widawskye0556842012-06-04 14:42:46 -0700572 bool is_initialized;
Ben Widawsky40521052012-06-04 14:42:43 -0700573 struct drm_i915_file_private *file_priv;
574 struct intel_ring_buffer *ring;
575 struct drm_i915_gem_object *obj;
Mika Kuoppalae59ec132013-06-12 12:35:28 +0300576 struct i915_ctx_hang_stats hang_stats;
Ben Widawsky40521052012-06-04 14:42:43 -0700577};
578
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700579struct i915_fbc {
580 unsigned long size;
581 unsigned int fb_id;
582 enum plane plane;
583 int y;
584
585 struct drm_mm_node *compressed_fb;
586 struct drm_mm_node *compressed_llb;
587
588 struct intel_fbc_work {
589 struct delayed_work work;
590 struct drm_crtc *crtc;
591 struct drm_framebuffer *fb;
592 int interval;
593 } *fbc_work;
594
Chris Wilson29ebf902013-07-27 17:23:55 +0100595 enum no_fbc_reason {
596 FBC_OK, /* FBC is enabled */
597 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700598 FBC_NO_OUTPUT, /* no outputs enabled to compress */
599 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
600 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
601 FBC_MODE_TOO_LARGE, /* mode too large for compression */
602 FBC_BAD_PLANE, /* fbc not supported on plane */
603 FBC_NOT_TILED, /* buffer not tiled */
604 FBC_MULTIPLE_PIPES, /* more than one pipe active */
605 FBC_MODULE_PARAM,
606 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
607 } no_fbc_reason;
Jesse Barnesb5e50c32010-02-05 12:42:41 -0800608};
609
Rodrigo Vivi3f51e472013-07-11 18:45:00 -0300610enum no_psr_reason {
611 PSR_NO_SOURCE, /* Not supported on platform */
612 PSR_NO_SINK, /* Not supported by panel */
Rodrigo Vivi105b7c12013-07-11 18:45:02 -0300613 PSR_MODULE_PARAM,
Rodrigo Vivi3f51e472013-07-11 18:45:00 -0300614 PSR_CRTC_NOT_ACTIVE,
615 PSR_PWR_WELL_ENABLED,
616 PSR_NOT_TILED,
617 PSR_SPRITE_ENABLED,
618 PSR_S3D_ENABLED,
619 PSR_INTERLACED_ENABLED,
620 PSR_HSW_NOT_DDIA,
621};
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700622
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800623enum intel_pch {
Paulo Zanonif0350832012-07-03 18:48:16 -0300624 PCH_NONE = 0, /* No PCH present */
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800625 PCH_IBX, /* Ibexpeak PCH */
626 PCH_CPT, /* Cougarpoint PCH */
Eugeni Dodonoveb877eb2012-03-29 12:32:20 -0300627 PCH_LPT, /* Lynxpoint PCH */
Ben Widawsky40c7ead2013-04-05 13:12:40 -0700628 PCH_NOP,
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800629};
630
Paulo Zanoni988d6ee2012-12-01 12:04:24 -0200631enum intel_sbi_destination {
632 SBI_ICLK,
633 SBI_MPHY,
634};
635
Jesse Barnesb690e962010-07-19 13:53:12 -0700636#define QUIRK_PIPEA_FORCE (1<<0)
Keith Packard435793d2011-07-12 14:56:22 -0700637#define QUIRK_LVDS_SSC_DISABLE (1<<1)
Carsten Emde4dca20e2012-03-15 15:56:26 +0100638#define QUIRK_INVERT_BRIGHTNESS (1<<2)
Kamal Mostafae85843b2013-07-19 15:02:01 -0700639#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
Jesse Barnesb690e962010-07-19 13:53:12 -0700640
Dave Airlie8be48d92010-03-30 05:34:14 +0000641struct intel_fbdev;
Chris Wilson1630fe72011-07-08 12:22:42 +0100642struct intel_fbc_work;
Dave Airlie38651672010-03-30 05:34:13 +0000643
Daniel Vetterc2b91522012-02-14 22:37:19 +0100644struct intel_gmbus {
645 struct i2c_adapter adapter;
Chris Wilsonf2ce9fa2012-11-10 15:58:21 +0000646 u32 force_bit;
Daniel Vetterc2b91522012-02-14 22:37:19 +0100647 u32 reg0;
Daniel Vetter36c785f2012-02-14 22:37:22 +0100648 u32 gpio_reg;
Daniel Vetterc167a6f2012-02-28 00:43:09 +0100649 struct i2c_algo_bit_data bit_algo;
Daniel Vetterc2b91522012-02-14 22:37:19 +0100650 struct drm_i915_private *dev_priv;
651};
652
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100653struct i915_suspend_saved_registers {
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000654 u8 saveLBB;
655 u32 saveDSPACNTR;
656 u32 saveDSPBCNTR;
Keith Packarde948e992008-05-07 12:27:53 +1000657 u32 saveDSPARB;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000658 u32 savePIPEACONF;
659 u32 savePIPEBCONF;
660 u32 savePIPEASRC;
661 u32 savePIPEBSRC;
662 u32 saveFPA0;
663 u32 saveFPA1;
664 u32 saveDPLL_A;
665 u32 saveDPLL_A_MD;
666 u32 saveHTOTAL_A;
667 u32 saveHBLANK_A;
668 u32 saveHSYNC_A;
669 u32 saveVTOTAL_A;
670 u32 saveVBLANK_A;
671 u32 saveVSYNC_A;
672 u32 saveBCLRPAT_A;
Zhenyu Wang5586c8b2009-11-06 02:13:02 +0000673 u32 saveTRANSACONF;
Zhenyu Wang42048782009-10-21 15:27:01 +0800674 u32 saveTRANS_HTOTAL_A;
675 u32 saveTRANS_HBLANK_A;
676 u32 saveTRANS_HSYNC_A;
677 u32 saveTRANS_VTOTAL_A;
678 u32 saveTRANS_VBLANK_A;
679 u32 saveTRANS_VSYNC_A;
Jesse Barnes0da3ea12008-02-20 09:39:58 +1000680 u32 savePIPEASTAT;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000681 u32 saveDSPASTRIDE;
682 u32 saveDSPASIZE;
683 u32 saveDSPAPOS;
Jesse Barnes585fb112008-07-29 11:54:06 -0700684 u32 saveDSPAADDR;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000685 u32 saveDSPASURF;
686 u32 saveDSPATILEOFF;
687 u32 savePFIT_PGM_RATIOS;
Jesse Barnes0eb96d62009-10-14 12:33:41 -0700688 u32 saveBLC_HIST_CTL;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000689 u32 saveBLC_PWM_CTL;
690 u32 saveBLC_PWM_CTL2;
Zhenyu Wang42048782009-10-21 15:27:01 +0800691 u32 saveBLC_CPU_PWM_CTL;
692 u32 saveBLC_CPU_PWM_CTL2;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000693 u32 saveFPB0;
694 u32 saveFPB1;
695 u32 saveDPLL_B;
696 u32 saveDPLL_B_MD;
697 u32 saveHTOTAL_B;
698 u32 saveHBLANK_B;
699 u32 saveHSYNC_B;
700 u32 saveVTOTAL_B;
701 u32 saveVBLANK_B;
702 u32 saveVSYNC_B;
703 u32 saveBCLRPAT_B;
Zhenyu Wang5586c8b2009-11-06 02:13:02 +0000704 u32 saveTRANSBCONF;
Zhenyu Wang42048782009-10-21 15:27:01 +0800705 u32 saveTRANS_HTOTAL_B;
706 u32 saveTRANS_HBLANK_B;
707 u32 saveTRANS_HSYNC_B;
708 u32 saveTRANS_VTOTAL_B;
709 u32 saveTRANS_VBLANK_B;
710 u32 saveTRANS_VSYNC_B;
Jesse Barnes0da3ea12008-02-20 09:39:58 +1000711 u32 savePIPEBSTAT;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000712 u32 saveDSPBSTRIDE;
713 u32 saveDSPBSIZE;
714 u32 saveDSPBPOS;
Jesse Barnes585fb112008-07-29 11:54:06 -0700715 u32 saveDSPBADDR;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000716 u32 saveDSPBSURF;
717 u32 saveDSPBTILEOFF;
Jesse Barnes585fb112008-07-29 11:54:06 -0700718 u32 saveVGA0;
719 u32 saveVGA1;
720 u32 saveVGA_PD;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000721 u32 saveVGACNTRL;
722 u32 saveADPA;
723 u32 saveLVDS;
Jesse Barnes585fb112008-07-29 11:54:06 -0700724 u32 savePP_ON_DELAYS;
725 u32 savePP_OFF_DELAYS;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000726 u32 saveDVOA;
727 u32 saveDVOB;
728 u32 saveDVOC;
729 u32 savePP_ON;
730 u32 savePP_OFF;
731 u32 savePP_CONTROL;
Jesse Barnes585fb112008-07-29 11:54:06 -0700732 u32 savePP_DIVISOR;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000733 u32 savePFIT_CONTROL;
734 u32 save_palette_a[256];
735 u32 save_palette_b[256];
Jesse Barnes06027f92009-10-05 13:47:26 -0700736 u32 saveDPFC_CB_BASE;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000737 u32 saveFBC_CFB_BASE;
738 u32 saveFBC_LL_BASE;
739 u32 saveFBC_CONTROL;
740 u32 saveFBC_CONTROL2;
Jesse Barnes0da3ea12008-02-20 09:39:58 +1000741 u32 saveIER;
742 u32 saveIIR;
743 u32 saveIMR;
Zhenyu Wang42048782009-10-21 15:27:01 +0800744 u32 saveDEIER;
745 u32 saveDEIMR;
746 u32 saveGTIER;
747 u32 saveGTIMR;
748 u32 saveFDI_RXA_IMR;
749 u32 saveFDI_RXB_IMR;
Keith Packard1f84e552008-02-16 19:19:29 -0800750 u32 saveCACHE_MODE_0;
Keith Packard1f84e552008-02-16 19:19:29 -0800751 u32 saveMI_ARB_STATE;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000752 u32 saveSWF0[16];
753 u32 saveSWF1[16];
754 u32 saveSWF2[3];
755 u8 saveMSR;
756 u8 saveSR[8];
Jesse Barnes123f7942008-02-07 11:15:20 -0800757 u8 saveGR[25];
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000758 u8 saveAR_INDEX;
Jesse Barnesa59e1222008-05-07 12:25:46 +1000759 u8 saveAR[21];
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000760 u8 saveDACMASK;
Jesse Barnesa59e1222008-05-07 12:25:46 +1000761 u8 saveCR[37];
Daniel Vetter4b9de732011-10-09 21:52:02 +0200762 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
Eric Anholt1fd1c622009-06-03 07:26:58 +0000763 u32 saveCURACNTR;
764 u32 saveCURAPOS;
765 u32 saveCURABASE;
766 u32 saveCURBCNTR;
767 u32 saveCURBPOS;
768 u32 saveCURBBASE;
769 u32 saveCURSIZE;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700770 u32 saveDP_B;
771 u32 saveDP_C;
772 u32 saveDP_D;
773 u32 savePIPEA_GMCH_DATA_M;
774 u32 savePIPEB_GMCH_DATA_M;
775 u32 savePIPEA_GMCH_DATA_N;
776 u32 savePIPEB_GMCH_DATA_N;
777 u32 savePIPEA_DP_LINK_M;
778 u32 savePIPEB_DP_LINK_M;
779 u32 savePIPEA_DP_LINK_N;
780 u32 savePIPEB_DP_LINK_N;
Zhenyu Wang42048782009-10-21 15:27:01 +0800781 u32 saveFDI_RXA_CTL;
782 u32 saveFDI_TXA_CTL;
783 u32 saveFDI_RXB_CTL;
784 u32 saveFDI_TXB_CTL;
785 u32 savePFA_CTL_1;
786 u32 savePFB_CTL_1;
787 u32 savePFA_WIN_SZ;
788 u32 savePFB_WIN_SZ;
789 u32 savePFA_WIN_POS;
790 u32 savePFB_WIN_POS;
Zhenyu Wang5586c8b2009-11-06 02:13:02 +0000791 u32 savePCH_DREF_CONTROL;
792 u32 saveDISP_ARB_CTL;
793 u32 savePIPEA_DATA_M1;
794 u32 savePIPEA_DATA_N1;
795 u32 savePIPEA_LINK_M1;
796 u32 savePIPEA_LINK_N1;
797 u32 savePIPEB_DATA_M1;
798 u32 savePIPEB_DATA_N1;
799 u32 savePIPEB_LINK_M1;
800 u32 savePIPEB_LINK_N1;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000801 u32 saveMCHBAR_RENDER_STANDBY;
Adam Jacksoncda2bb72011-07-26 16:53:06 -0400802 u32 savePCH_PORT_HOTPLUG;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100803};
Daniel Vetterc85aa882012-11-02 19:55:03 +0100804
805struct intel_gen6_power_mgmt {
Daniel Vetter59cdb632013-07-04 23:35:28 +0200806 /* work and pm_iir are protected by dev_priv->irq_lock */
Daniel Vetterc85aa882012-11-02 19:55:03 +0100807 struct work_struct work;
808 u32 pm_iir;
Daniel Vetter59cdb632013-07-04 23:35:28 +0200809
810 /* On vlv we need to manually drop to Vmin with a delayed work. */
811 struct delayed_work vlv_work;
Daniel Vetterc85aa882012-11-02 19:55:03 +0100812
813 /* The below variables an all the rps hw state are protected by
814 * dev->struct mutext. */
815 u8 cur_delay;
816 u8 min_delay;
817 u8 max_delay;
Jesse Barnes52ceb902013-04-23 10:09:26 -0700818 u8 rpe_delay;
Ben Widawsky31c77382013-04-05 14:29:22 -0700819 u8 hw_max;
Jesse Barnes1a01ab32012-11-02 11:14:00 -0700820
821 struct delayed_work delayed_resume_work;
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700822
823 /*
824 * Protects RPS/RC6 register access and PCU communication.
825 * Must be taken after struct_mutex if nested.
826 */
827 struct mutex hw_lock;
Daniel Vetterc85aa882012-11-02 19:55:03 +0100828};
829
Daniel Vetter1a240d42012-11-29 22:18:51 +0100830/* defined intel_pm.c */
831extern spinlock_t mchdev_lock;
832
Daniel Vetterc85aa882012-11-02 19:55:03 +0100833struct intel_ilk_power_mgmt {
834 u8 cur_delay;
835 u8 min_delay;
836 u8 max_delay;
837 u8 fmax;
838 u8 fstart;
839
840 u64 last_count1;
841 unsigned long last_time1;
842 unsigned long chipset_power;
843 u64 last_count2;
844 struct timespec last_time2;
845 unsigned long gfx_power;
846 u8 corr;
847
848 int c_m;
849 int r_t;
Daniel Vetter3e373942012-11-02 19:55:04 +0100850
851 struct drm_i915_gem_object *pwrctx;
852 struct drm_i915_gem_object *renderctx;
Daniel Vetterc85aa882012-11-02 19:55:03 +0100853};
854
Wang Xingchaoa38911a2013-05-30 22:07:11 +0800855/* Power well structure for haswell */
856struct i915_power_well {
857 struct drm_device *device;
858 spinlock_t lock;
859 /* power well enable/disable usage count */
860 int count;
861 int i915_request;
862};
863
Daniel Vetter231f42a2012-11-02 19:55:05 +0100864struct i915_dri1_state {
865 unsigned allow_batchbuffer : 1;
866 u32 __iomem *gfx_hws_cpu_addr;
867
868 unsigned int cpp;
869 int back_offset;
870 int front_offset;
871 int current_page;
872 int page_flipping;
873
874 uint32_t counter;
875};
876
Daniel Vetterdb1b76c2013-07-09 16:51:37 +0200877struct i915_ums_state {
878 /**
879 * Flag if the X Server, and thus DRM, is not currently in
880 * control of the device.
881 *
882 * This is set between LeaveVT and EnterVT. It needs to be
883 * replaced with a semaphore. It also needs to be
884 * transitioned away from for kernel modesetting.
885 */
886 int mm_suspended;
887};
888
Daniel Vettera4da4fa2012-11-02 19:55:07 +0100889struct intel_l3_parity {
890 u32 *remap_info;
891 struct work_struct error_work;
892};
893
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100894struct i915_gem_mm {
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100895 /** Memory allocator for GTT stolen memory */
896 struct drm_mm stolen;
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100897 /** List of all objects in gtt_space. Used to restore gtt
898 * mappings on resume */
899 struct list_head bound_list;
900 /**
901 * List of objects which are not bound to the GTT (thus
902 * are idle and not used by the GPU) but still have
903 * (presumably uncached) pages still attached.
904 */
905 struct list_head unbound_list;
906
907 /** Usable portion of the GTT for GEM */
908 unsigned long stolen_base; /* limited to low memory (32-bit) */
909
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100910 /** PPGTT used for aliasing the PPGTT with the GTT */
911 struct i915_hw_ppgtt *aliasing_ppgtt;
912
913 struct shrinker inactive_shrinker;
914 bool shrinker_no_lock_stealing;
915
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100916 /** LRU list of objects with fence regs on them. */
917 struct list_head fence_list;
918
919 /**
920 * We leave the user IRQ off as much as possible,
921 * but this means that requests will finish and never
922 * be retired once the system goes idle. Set a timer to
923 * fire periodically while the ring is running. When it
924 * fires, go retire requests.
925 */
926 struct delayed_work retire_work;
927
928 /**
929 * Are we in a non-interruptible section of code like
930 * modesetting?
931 */
932 bool interruptible;
933
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100934 /** Bit 6 swizzling required for X tiling */
935 uint32_t bit_6_swizzle_x;
936 /** Bit 6 swizzling required for Y tiling */
937 uint32_t bit_6_swizzle_y;
938
939 /* storage for physical objects */
940 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
941
942 /* accounting, useful for userland debugging */
Daniel Vetterc20e8352013-07-24 22:40:23 +0200943 spinlock_t object_stat_lock;
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100944 size_t object_memory;
945 u32 object_count;
946};
947
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300948struct drm_i915_error_state_buf {
949 unsigned bytes;
950 unsigned size;
951 int err;
952 u8 *buf;
953 loff_t start;
954 loff_t pos;
955};
956
Mika Kuoppalafc16b482013-06-06 15:18:39 +0300957struct i915_error_state_file_priv {
958 struct drm_device *dev;
959 struct drm_i915_error_state *error;
960};
961
Daniel Vetter99584db2012-11-14 17:14:04 +0100962struct i915_gpu_error {
963 /* For hangcheck timer */
964#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
965#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
966 struct timer_list hangcheck_timer;
Daniel Vetter99584db2012-11-14 17:14:04 +0100967
968 /* For reset and error_state handling. */
969 spinlock_t lock;
970 /* Protected by the above dev->gpu_error.lock. */
971 struct drm_i915_error_state *first_error;
972 struct work_struct work;
Daniel Vetter99584db2012-11-14 17:14:04 +0100973
974 unsigned long last_reset;
975
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100976 /**
Daniel Vetterf69061b2012-12-06 09:01:42 +0100977 * State variable and reset counter controlling the reset flow
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100978 *
Daniel Vetterf69061b2012-12-06 09:01:42 +0100979 * Upper bits are for the reset counter. This counter is used by the
980 * wait_seqno code to race-free noticed that a reset event happened and
981 * that it needs to restart the entire ioctl (since most likely the
982 * seqno it waited for won't ever signal anytime soon).
983 *
984 * This is important for lock-free wait paths, where no contended lock
985 * naturally enforces the correct ordering between the bail-out of the
986 * waiter and the gpu reset work code.
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100987 *
988 * Lowest bit controls the reset state machine: Set means a reset is in
989 * progress. This state will (presuming we don't have any bugs) decay
990 * into either unset (successful reset) or the special WEDGED value (hw
991 * terminally sour). All waiters on the reset_queue will be woken when
992 * that happens.
993 */
994 atomic_t reset_counter;
995
996 /**
997 * Special values/flags for reset_counter
998 *
999 * Note that the code relies on
1000 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
1001 * being true.
1002 */
1003#define I915_RESET_IN_PROGRESS_FLAG 1
1004#define I915_WEDGED 0xffffffff
1005
1006 /**
1007 * Waitqueue to signal when the reset has completed. Used by clients
1008 * that wait for dev_priv->mm.wedged to settle.
1009 */
1010 wait_queue_head_t reset_queue;
Daniel Vetter33196de2012-11-14 17:14:05 +01001011
Daniel Vetter99584db2012-11-14 17:14:04 +01001012 /* For gpu hang simulation. */
1013 unsigned int stop_rings;
1014};
1015
Zhang Ruib8efb172013-02-05 15:41:53 +08001016enum modeset_restore {
1017 MODESET_ON_LID_OPEN,
1018 MODESET_DONE,
1019 MODESET_SUSPENDED,
1020};
1021
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001022struct intel_vbt_data {
1023 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1024 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1025
1026 /* Feature bits */
1027 unsigned int int_tv_support:1;
1028 unsigned int lvds_dither:1;
1029 unsigned int lvds_vbt:1;
1030 unsigned int int_crt_support:1;
1031 unsigned int lvds_use_ssc:1;
1032 unsigned int display_clock_mode:1;
1033 unsigned int fdi_rx_polarity_inverted:1;
1034 int lvds_ssc_freq;
1035 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1036
1037 /* eDP */
1038 int edp_rate;
1039 int edp_lanes;
1040 int edp_preemphasis;
1041 int edp_vswing;
1042 bool edp_initialized;
1043 bool edp_support;
1044 int edp_bpp;
1045 struct edp_power_seq edp_pps;
1046
1047 int crt_ddc_pin;
1048
1049 int child_dev_num;
1050 struct child_device_config *child_dev;
1051};
1052
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001053typedef struct drm_i915_private {
1054 struct drm_device *dev;
Chris Wilson42dcedd2012-11-15 11:32:30 +00001055 struct kmem_cache *slab;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001056
1057 const struct intel_device_info *info;
1058
1059 int relative_constants_mode;
1060
1061 void __iomem *regs;
1062
Chris Wilson907b28c2013-07-19 20:36:52 +01001063 struct intel_uncore uncore;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001064
1065 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1066
Daniel Vetter28c70f12012-12-01 13:53:45 +01001067
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001068 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1069 * controller on different i2c buses. */
1070 struct mutex gmbus_mutex;
1071
1072 /**
1073 * Base address of the gmbus and gpio block.
1074 */
1075 uint32_t gpio_mmio_base;
1076
Daniel Vetter28c70f12012-12-01 13:53:45 +01001077 wait_queue_head_t gmbus_wait_queue;
1078
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001079 struct pci_dev *bridge_dev;
1080 struct intel_ring_buffer ring[I915_NUM_RINGS];
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02001081 uint32_t last_seqno, next_seqno;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001082
1083 drm_dma_handle_t *status_page_dmah;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001084 struct resource mch_res;
1085
1086 atomic_t irq_received;
1087
1088 /* protects the irq masks */
1089 spinlock_t irq_lock;
1090
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001091 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1092 struct pm_qos_request pm_qos;
1093
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001094 /* DPIO indirect register protection */
Daniel Vetter09153002012-12-12 14:06:44 +01001095 struct mutex dpio_lock;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001096
1097 /** Cached value of IMR to avoid reads in updating the bitfield */
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001098 u32 irq_mask;
1099 u32 gt_irq_mask;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001100
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001101 struct work_struct hotplug_work;
Daniel Vetter52d7ece2012-12-01 21:03:22 +01001102 bool enable_hotplug_processing;
Egbert Eichb543fb02013-04-16 13:36:54 +02001103 struct {
1104 unsigned long hpd_last_jiffies;
1105 int hpd_cnt;
1106 enum {
1107 HPD_ENABLED = 0,
1108 HPD_DISABLED = 1,
1109 HPD_MARK_DISABLED = 2
1110 } hpd_mark;
1111 } hpd_stats[HPD_NUM_PINS];
Egbert Eich142e2392013-04-11 15:57:57 +02001112 u32 hpd_event_bits;
Egbert Eichac4c16c2013-04-16 13:36:58 +02001113 struct timer_list hotplug_reenable_timer;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001114
Jesse Barnes7f1f3852013-04-02 11:22:20 -07001115 int num_plane;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001116
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -07001117 struct i915_fbc fbc;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001118 struct intel_opregion opregion;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001119 struct intel_vbt_data vbt;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001120
1121 /* overlay */
1122 struct intel_overlay *overlay;
Ville Syrjälä2c6602d2013-02-08 23:13:35 +02001123 unsigned int sprite_scaling_enabled;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001124
Jani Nikula31ad8ec2013-04-02 15:48:09 +03001125 /* backlight */
1126 struct {
1127 int level;
1128 bool enabled;
Jani Nikula8ba2d182013-04-12 15:18:37 +03001129 spinlock_t lock; /* bl registers and the above bl fields */
Jani Nikula31ad8ec2013-04-02 15:48:09 +03001130 struct backlight_device *device;
1131 } backlight;
1132
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001133 /* LVDS info */
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001134 bool no_aux_handshake;
1135
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001136 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1137 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1138 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1139
1140 unsigned int fsb_freq, mem_freq, is_ddr3;
1141
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001142 struct workqueue_struct *wq;
1143
1144 /* Display functions */
1145 struct drm_i915_display_funcs display;
1146
1147 /* PCH chipset type */
1148 enum intel_pch pch_type;
Paulo Zanoni17a303e2012-11-20 15:12:07 -02001149 unsigned short pch_id;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001150
1151 unsigned long quirks;
1152
Zhang Ruib8efb172013-02-05 15:41:53 +08001153 enum modeset_restore modeset_restore;
1154 struct mutex modeset_restore_lock;
Eric Anholt673a3942008-07-30 12:06:12 -07001155
Ben Widawskya7bbbd62013-07-16 16:50:07 -07001156 struct list_head vm_list; /* Global list of all address spaces */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001157 struct i915_gtt gtt; /* VMA representing the global address space */
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001158
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001159 struct i915_gem_mm mm;
Daniel Vetter87813422012-05-02 11:49:32 +02001160
Daniel Vetter87813422012-05-02 11:49:32 +02001161 /* Kernel Modesetting */
1162
yakui_zhao9b9d1722009-05-31 17:17:17 +08001163 struct sdvo_device_mapping sdvo_mappings[2];
Jesse Barnes652c3932009-08-17 13:31:43 -07001164
Jesse Barnes27f82272011-09-02 12:54:37 -07001165 struct drm_crtc *plane_to_crtc_mapping[3];
1166 struct drm_crtc *pipe_to_crtc_mapping[3];
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05001167 wait_queue_head_t pending_flip_queue;
1168
Daniel Vettere72f9fb2013-06-05 13:34:06 +02001169 int num_shared_dpll;
1170 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
Paulo Zanoni6441ab52012-10-05 12:05:58 -03001171 struct intel_ddi_plls ddi_plls;
Jesse Barnesee7b9f92012-04-20 17:11:53 +01001172
Jesse Barnes652c3932009-08-17 13:31:43 -07001173 /* Reclocking support */
1174 bool render_reclock_avail;
1175 bool lvds_downclock_avail;
Zhao Yakui18f9ed12009-11-20 03:24:16 +00001176 /* indicates the reduced downclock for LVDS*/
1177 int lvds_downclock;
Jesse Barnes652c3932009-08-17 13:31:43 -07001178 u16 orig_clock;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001179
Zhenyu Wangc48044112009-12-17 14:48:43 +08001180 bool mchbar_need_disable;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001181
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001182 struct intel_l3_parity l3_parity;
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001183
Ben Widawsky59124502013-07-04 11:02:05 -07001184 /* Cannot be determined by PCIID. You must always read a register. */
1185 size_t ellc_size;
1186
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001187 /* gen6+ rps state */
Daniel Vetterc85aa882012-11-02 19:55:03 +01001188 struct intel_gen6_power_mgmt rps;
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001189
Daniel Vetter20e4d402012-08-08 23:35:39 +02001190 /* ilk-only ips/rps state. Everything in here is protected by the global
1191 * mchdev_lock in intel_pm.c */
Daniel Vetterc85aa882012-11-02 19:55:03 +01001192 struct intel_ilk_power_mgmt ips;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001193
Wang Xingchaoa38911a2013-05-30 22:07:11 +08001194 /* Haswell power well */
1195 struct i915_power_well power_well;
1196
Rodrigo Vivi3f51e472013-07-11 18:45:00 -03001197 enum no_psr_reason no_psr_reason;
1198
Daniel Vetter99584db2012-11-14 17:14:04 +01001199 struct i915_gpu_error gpu_error;
Chris Wilsonae681d92010-10-01 14:57:56 +01001200
Jesse Barnesc9cddff2013-05-08 10:45:13 -07001201 struct drm_i915_gem_object *vlv_pctx;
1202
Dave Airlie8be48d92010-03-30 05:34:14 +00001203 /* list of fbdev register on this device */
1204 struct intel_fbdev *fbdev;
Chris Wilsone953fd72011-02-21 22:23:52 +00001205
Jesse Barnes073f34d2012-11-02 11:13:59 -07001206 /*
1207 * The console may be contended at resume, but we don't
1208 * want it to block on it.
1209 */
1210 struct work_struct console_resume_work;
1211
Chris Wilsone953fd72011-02-21 22:23:52 +00001212 struct drm_property *broadcast_rgb_property;
Chris Wilson3f43c482011-05-12 22:17:24 +01001213 struct drm_property *force_audio_property;
Ben Widawskye3689192012-05-25 16:56:22 -07001214
Ben Widawsky254f9652012-06-04 14:42:42 -07001215 bool hw_contexts_disabled;
1216 uint32_t hw_context_size;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001217
Damien Lespiau3e683202012-12-11 18:48:29 +00001218 u32 fdi_rx_config;
Paulo Zanoni68d18ad2012-12-01 12:04:26 -02001219
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001220 struct i915_suspend_saved_registers regfile;
Daniel Vetter231f42a2012-11-02 19:55:05 +01001221
Ville Syrjälä53615a52013-08-01 16:18:50 +03001222 struct {
1223 /*
1224 * Raw watermark latency values:
1225 * in 0.1us units for WM0,
1226 * in 0.5us units for WM1+.
1227 */
1228 /* primary */
1229 uint16_t pri_latency[5];
1230 /* sprite */
1231 uint16_t spr_latency[5];
1232 /* cursor */
1233 uint16_t cur_latency[5];
1234 } wm;
1235
Daniel Vetter231f42a2012-11-02 19:55:05 +01001236 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1237 * here! */
1238 struct i915_dri1_state dri1;
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02001239 /* Old ums support infrastructure, same warning applies. */
1240 struct i915_ums_state ums;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241} drm_i915_private_t;
1242
Chris Wilson2c1792a2013-08-01 18:39:55 +01001243static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1244{
1245 return dev->dev_private;
1246}
1247
Chris Wilsonb4519512012-05-11 14:29:30 +01001248/* Iterate over initialised rings */
1249#define for_each_ring(ring__, dev_priv__, i__) \
1250 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1251 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1252
Wu Fengguangb1d7e4b2012-02-14 11:45:36 +08001253enum hdmi_force_audio {
1254 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
1255 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
1256 HDMI_AUDIO_AUTO, /* trust EDID */
1257 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1258};
1259
Daniel Vetter190d6cd2013-07-04 13:06:28 +02001260#define I915_GTT_OFFSET_NONE ((u32)-1)
Chris Wilsoned2f3452012-11-15 11:32:19 +00001261
Chris Wilson37e680a2012-06-07 15:38:42 +01001262struct drm_i915_gem_object_ops {
1263 /* Interface between the GEM object and its backing storage.
1264 * get_pages() is called once prior to the use of the associated set
1265 * of pages before to binding them into the GTT, and put_pages() is
1266 * called after we no longer need them. As we expect there to be
1267 * associated cost with migrating pages between the backing storage
1268 * and making them available for the GPU (e.g. clflush), we may hold
1269 * onto the pages after they are no longer referenced by the GPU
1270 * in case they may be used again shortly (for example migrating the
1271 * pages to a different memory domain within the GTT). put_pages()
1272 * will therefore most likely be called when the object itself is
1273 * being released or under memory pressure (where we attempt to
1274 * reap pages for the shrinker).
1275 */
1276 int (*get_pages)(struct drm_i915_gem_object *);
1277 void (*put_pages)(struct drm_i915_gem_object *);
1278};
1279
Eric Anholt673a3942008-07-30 12:06:12 -07001280struct drm_i915_gem_object {
Daniel Vetterc397b902010-04-09 19:05:07 +00001281 struct drm_gem_object base;
Eric Anholt673a3942008-07-30 12:06:12 -07001282
Chris Wilson37e680a2012-06-07 15:38:42 +01001283 const struct drm_i915_gem_object_ops *ops;
1284
Ben Widawsky2f633152013-07-17 12:19:03 -07001285 /** List of VMAs backed by this object */
1286 struct list_head vma_list;
1287
Chris Wilsonc1ad11f2012-11-15 11:32:21 +00001288 /** Stolen memory for this object, instead of being backed by shmem. */
1289 struct drm_mm_node *stolen;
Ben Widawsky35c20a62013-05-31 11:28:48 -07001290 struct list_head global_list;
Eric Anholt673a3942008-07-30 12:06:12 -07001291
Chris Wilson65ce3022012-07-20 12:41:02 +01001292 /** This object's place on the active/inactive lists */
Chris Wilson69dc4982010-10-19 10:36:51 +01001293 struct list_head ring_list;
1294 struct list_head mm_list;
Chris Wilson432e58e2010-11-25 19:32:06 +00001295 /** This object's place in the batchbuffer or on the eviction list */
1296 struct list_head exec_list;
Eric Anholt673a3942008-07-30 12:06:12 -07001297
1298 /**
Chris Wilson65ce3022012-07-20 12:41:02 +01001299 * This is set if the object is on the active lists (has pending
1300 * rendering and so a non-zero seqno), and is not set if it i s on
1301 * inactive (ready to be unbound) list.
Eric Anholt673a3942008-07-30 12:06:12 -07001302 */
Akshay Joshi0206e352011-08-16 15:34:10 -04001303 unsigned int active:1;
Eric Anholt673a3942008-07-30 12:06:12 -07001304
1305 /**
1306 * This is set if the object has been written to since last bound
1307 * to the GTT
1308 */
Akshay Joshi0206e352011-08-16 15:34:10 -04001309 unsigned int dirty:1;
Daniel Vetter778c3542010-05-13 11:49:44 +02001310
1311 /**
1312 * Fence register bits (if any) for this object. Will be set
1313 * as needed when mapped into the GTT.
1314 * Protected by dev->struct_mutex.
Daniel Vetter778c3542010-05-13 11:49:44 +02001315 */
Daniel Vetter4b9de732011-10-09 21:52:02 +02001316 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
Daniel Vetter778c3542010-05-13 11:49:44 +02001317
1318 /**
Daniel Vetter778c3542010-05-13 11:49:44 +02001319 * Advice: are the backing pages purgeable?
1320 */
Akshay Joshi0206e352011-08-16 15:34:10 -04001321 unsigned int madv:2;
Daniel Vetter778c3542010-05-13 11:49:44 +02001322
1323 /**
Daniel Vetter778c3542010-05-13 11:49:44 +02001324 * Current tiling mode for the object.
1325 */
Akshay Joshi0206e352011-08-16 15:34:10 -04001326 unsigned int tiling_mode:2;
Chris Wilson5d82e3e2012-04-21 16:23:23 +01001327 /**
1328 * Whether the tiling parameters for the currently associated fence
1329 * register have changed. Note that for the purposes of tracking
1330 * tiling changes we also treat the unfenced register, the register
1331 * slot that the object occupies whilst it executes a fenced
1332 * command (such as BLT on gen2/3), as a "fence".
1333 */
1334 unsigned int fence_dirty:1;
Daniel Vetter778c3542010-05-13 11:49:44 +02001335
1336 /** How many users have pinned this object in GTT space. The following
1337 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1338 * (via user_pin_count), execbuffer (objects are not allowed multiple
1339 * times for the same batchbuffer), and the framebuffer code. When
1340 * switching/pageflipping, the framebuffer code has at most two buffers
1341 * pinned per crtc.
1342 *
1343 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1344 * bits with absolutely no headroom. So use 4 bits. */
Akshay Joshi0206e352011-08-16 15:34:10 -04001345 unsigned int pin_count:4;
Daniel Vetter778c3542010-05-13 11:49:44 +02001346#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
Eric Anholt673a3942008-07-30 12:06:12 -07001347
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001348 /**
Daniel Vetter75e9e912010-11-04 17:11:09 +01001349 * Is the object at the current location in the gtt mappable and
1350 * fenceable? Used to avoid costly recalculations.
1351 */
Akshay Joshi0206e352011-08-16 15:34:10 -04001352 unsigned int map_and_fenceable:1;
Daniel Vetter75e9e912010-11-04 17:11:09 +01001353
1354 /**
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001355 * Whether the current gtt mapping needs to be mappable (and isn't just
1356 * mappable by accident). Track pin and fault separate for a more
1357 * accurate mappable working set.
1358 */
Akshay Joshi0206e352011-08-16 15:34:10 -04001359 unsigned int fault_mappable:1;
1360 unsigned int pin_mappable:1;
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001361
Chris Wilsoncaea7472010-11-12 13:53:37 +00001362 /*
1363 * Is the GPU currently using a fence to access this buffer,
1364 */
1365 unsigned int pending_fenced_gpu_access:1;
1366 unsigned int fenced_gpu_access:1;
1367
Chris Wilson93dfb402011-03-29 16:59:50 -07001368 unsigned int cache_level:2;
1369
Daniel Vetter7bddb012012-02-09 17:15:47 +01001370 unsigned int has_aliasing_ppgtt_mapping:1;
Daniel Vetter74898d72012-02-15 23:50:22 +01001371 unsigned int has_global_gtt_mapping:1;
Chris Wilson9da3da62012-06-01 15:20:22 +01001372 unsigned int has_dma_mapping:1;
Daniel Vetter7bddb012012-02-09 17:15:47 +01001373
Chris Wilson9da3da62012-06-01 15:20:22 +01001374 struct sg_table *pages;
Chris Wilsona5570172012-09-04 21:02:54 +01001375 int pages_pin_count;
Eric Anholt673a3942008-07-30 12:06:12 -07001376
Daniel Vetter1286ff72012-05-10 15:25:09 +02001377 /* prime dma-buf support */
Dave Airlie9a70cc22012-05-22 13:09:21 +01001378 void *dma_buf_vmapping;
1379 int vmapping_count;
1380
Daniel Vetter185cbcb2010-11-06 12:12:35 +01001381 /**
Chris Wilson67731b82010-12-08 10:38:14 +00001382 * Used for performing relocations during execbuffer insertion.
1383 */
1384 struct hlist_node exec_node;
1385 unsigned long exec_handle;
Chris Wilson6fe4f142011-01-10 17:35:37 +00001386 struct drm_i915_gem_exec_object2 *exec_entry;
Chris Wilson67731b82010-12-08 10:38:14 +00001387
Chris Wilsoncaea7472010-11-12 13:53:37 +00001388 struct intel_ring_buffer *ring;
1389
Chris Wilson1c293ea2012-04-17 15:31:27 +01001390 /** Breadcrumb of last rendering to the buffer. */
Chris Wilson0201f1e2012-07-20 12:41:01 +01001391 uint32_t last_read_seqno;
1392 uint32_t last_write_seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001393 /** Breadcrumb of last fenced GPU access to the buffer. */
1394 uint32_t last_fenced_seqno;
Eric Anholt673a3942008-07-30 12:06:12 -07001395
Daniel Vetter778c3542010-05-13 11:49:44 +02001396 /** Current tiling stride for the object, if it's tiled. */
Jesse Barnesde151cf2008-11-12 10:03:55 -08001397 uint32_t stride;
Eric Anholt673a3942008-07-30 12:06:12 -07001398
Eric Anholt280b7132009-03-12 16:56:27 -07001399 /** Record of address bit 17 of each page at last unbind. */
Chris Wilsond312ec22010-06-06 15:40:22 +01001400 unsigned long *bit_17;
Eric Anholt280b7132009-03-12 16:56:27 -07001401
Jesse Barnes79e53942008-11-07 14:24:08 -08001402 /** User space pin count and filp owning the pin */
1403 uint32_t user_pin_count;
1404 struct drm_file *pin_filp;
Dave Airlie71acb5e2008-12-30 20:31:46 +10001405
1406 /** for phy allocated objects */
1407 struct drm_i915_gem_phys_object *phys_obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001408};
Daniel Vetterb45305f2012-12-17 16:21:27 +01001409#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
Eric Anholt673a3942008-07-30 12:06:12 -07001410
Daniel Vetter62b8b212010-04-09 19:05:08 +00001411#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
Daniel Vetter23010e42010-03-08 13:35:02 +01001412
Eric Anholt673a3942008-07-30 12:06:12 -07001413/**
1414 * Request queue structure.
1415 *
1416 * The request queue allows us to note sequence numbers that have been emitted
1417 * and may be associated with active buffers to be retired.
1418 *
1419 * By keeping this list, we can avoid having to do questionable
1420 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1421 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1422 */
1423struct drm_i915_gem_request {
Zou Nan hai852835f2010-05-21 09:08:56 +08001424 /** On Which ring this request was generated */
1425 struct intel_ring_buffer *ring;
1426
Eric Anholt673a3942008-07-30 12:06:12 -07001427 /** GEM sequence number associated with this request. */
1428 uint32_t seqno;
1429
Mika Kuoppala7d736f42013-06-12 15:01:39 +03001430 /** Position in the ringbuffer of the start of the request */
1431 u32 head;
1432
1433 /** Position in the ringbuffer of the end of the request */
Chris Wilsona71d8d92012-02-15 11:25:36 +00001434 u32 tail;
1435
Mika Kuoppala0e50e962013-05-02 16:48:08 +03001436 /** Context related to this request */
1437 struct i915_hw_context *ctx;
1438
Mika Kuoppala7d736f42013-06-12 15:01:39 +03001439 /** Batch buffer related to this request if any */
1440 struct drm_i915_gem_object *batch_obj;
1441
Eric Anholt673a3942008-07-30 12:06:12 -07001442 /** Time at which this request was emitted, in jiffies. */
1443 unsigned long emitted_jiffies;
1444
Eric Anholtb9624422009-06-03 07:27:35 +00001445 /** global list entry for this request */
Eric Anholt673a3942008-07-30 12:06:12 -07001446 struct list_head list;
Eric Anholtb9624422009-06-03 07:27:35 +00001447
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001448 struct drm_i915_file_private *file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001449 /** file_priv list entry for this request */
1450 struct list_head client_list;
Eric Anholt673a3942008-07-30 12:06:12 -07001451};
1452
1453struct drm_i915_file_private {
1454 struct {
Luis R. Rodriguez99057c82012-11-29 12:45:06 -08001455 spinlock_t lock;
Eric Anholtb9624422009-06-03 07:27:35 +00001456 struct list_head request_list;
Eric Anholt673a3942008-07-30 12:06:12 -07001457 } mm;
Ben Widawsky40521052012-06-04 14:42:43 -07001458 struct idr context_idr;
Mika Kuoppalae59ec132013-06-12 12:35:28 +03001459
1460 struct i915_ctx_hang_stats hang_stats;
Eric Anholt673a3942008-07-30 12:06:12 -07001461};
1462
Chris Wilson2c1792a2013-08-01 18:39:55 +01001463#define INTEL_INFO(dev) (to_i915(dev)->info)
Zou Nan haicae58522010-11-09 17:17:32 +08001464
1465#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1466#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1467#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1468#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1469#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1470#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1471#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1472#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1473#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1474#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1475#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1476#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1477#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1478#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1479#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1480#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1481#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1482#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
Jesse Barnes4b651772011-04-28 14:33:09 -07001483#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
Jesse Barnes8ab43972012-10-25 12:15:42 -07001484#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1485 (dev)->pci_device == 0x0152 || \
1486 (dev)->pci_device == 0x015a)
Daniel Vetter6547fbd2012-12-14 23:38:29 +01001487#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1488 (dev)->pci_device == 0x0106 || \
1489 (dev)->pci_device == 0x010A)
Jesse Barnes70a3eb72012-03-28 13:39:21 -07001490#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
Eugeni Dodonov4cae9ae2012-03-29 12:32:18 -03001491#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
Zou Nan haicae58522010-11-09 17:17:32 +08001492#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
Paulo Zanonid567b072012-11-20 13:27:43 -02001493#define IS_ULT(dev) (IS_HASWELL(dev) && \
1494 ((dev)->pci_device & 0xFF00) == 0x0A00)
Zou Nan haicae58522010-11-09 17:17:32 +08001495
Jesse Barnes85436692011-04-06 12:11:14 -07001496/*
1497 * The genX designation typically refers to the render engine, so render
1498 * capability related checks should use IS_GEN, while display and other checks
1499 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1500 * chips, etc.).
1501 */
Zou Nan haicae58522010-11-09 17:17:32 +08001502#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1503#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1504#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1505#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1506#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
Jesse Barnes85436692011-04-06 12:11:14 -07001507#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
Zou Nan haicae58522010-11-09 17:17:32 +08001508
1509#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1510#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
Xiang, Haihaof72a1182013-05-28 19:22:22 -07001511#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02001512#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
Zou Nan haicae58522010-11-09 17:17:32 +08001513#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1514
Ben Widawsky254f9652012-06-04 14:42:42 -07001515#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
Jesse Barnes93553602012-06-15 11:55:23 -07001516#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001517
Chris Wilson05394f32010-11-08 19:18:58 +00001518#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
Zou Nan haicae58522010-11-09 17:17:32 +08001519#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1520
Daniel Vetterb45305f2012-12-17 16:21:27 +01001521/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1522#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1523
Zou Nan haicae58522010-11-09 17:17:32 +08001524/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1525 * rows, which changed the alignment requirements and fence programming.
1526 */
1527#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1528 IS_I915GM(dev)))
1529#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1530#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1531#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1532#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1533#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1534#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1535/* dsparb controlled by hw only */
1536#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1537
1538#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1539#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1540#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
Zou Nan haicae58522010-11-09 17:17:32 +08001541
Damien Lespiauf5adf942013-06-24 18:29:34 +01001542#define HAS_IPS(dev) (IS_ULT(dev))
1543
Jesse Barneseceae482011-04-06 12:15:08 -07001544#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
Zou Nan haicae58522010-11-09 17:17:32 +08001545
Damien Lespiaudd93be52013-04-22 18:40:39 +01001546#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
Paulo Zanoni86d52df2013-03-06 20:03:18 -03001547#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
Damien Lespiau30568c42013-04-22 18:40:41 +01001548#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
Paulo Zanoniaffa9352012-11-23 15:30:39 -02001549
Paulo Zanoni17a303e2012-11-20 15:12:07 -02001550#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1551#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1552#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1553#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1554#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1555#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1556
Chris Wilson2c1792a2013-08-01 18:39:55 +01001557#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
Eugeni Dodonoveb877eb2012-03-29 12:32:20 -03001558#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
Zou Nan haicae58522010-11-09 17:17:32 +08001559#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1560#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
Ben Widawsky40c7ead2013-04-05 13:12:40 -07001561#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
Paulo Zanoni45e6e3a2012-07-03 15:57:32 -03001562#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
Zou Nan haicae58522010-11-09 17:17:32 +08001563
Daniel Vetterb7884eb2012-06-04 11:18:15 +02001564#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1565
Ben Widawskyf27b9262012-07-24 20:47:32 -07001566#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
Ben Widawskye1ef7cc2012-07-24 20:47:31 -07001567
Ben Widawskyc8735b02012-09-07 19:43:39 -07001568#define GT_FREQUENCY_MULTIPLIER 50
1569
Chris Wilson05394f32010-11-08 19:18:58 +00001570#include "i915_trace.h"
1571
Eugeni Dodonov83b7f9a2012-03-23 11:57:18 -03001572/**
1573 * RC6 is a special power stage which allows the GPU to enter an very
1574 * low-voltage mode when idle, using down to 0V while at this stage. This
1575 * stage is entered automatically when the GPU is idle when RC6 support is
1576 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1577 *
1578 * There are different RC6 modes available in Intel GPU, which differentiate
1579 * among each other with the latency required to enter and leave RC6 and
1580 * voltage consumed by the GPU in different states.
1581 *
1582 * The combination of the following flags define which states GPU is allowed
1583 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1584 * RC6pp is deepest RC6. Their support by hardware varies according to the
1585 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1586 * which brings the most power savings; deeper states save more power, but
1587 * require higher latency to switch to and wake up.
1588 */
1589#define INTEL_RC6_ENABLE (1<<0)
1590#define INTEL_RC6p_ENABLE (1<<1)
1591#define INTEL_RC6pp_ENABLE (1<<2)
1592
Eric Anholtc153f452007-09-03 12:06:45 +10001593extern struct drm_ioctl_desc i915_ioctls[];
Dave Airlieb3a83632005-09-30 18:37:36 +10001594extern int i915_max_ioctl;
Ben Widawskya35d9d32011-07-13 14:38:17 -07001595extern unsigned int i915_fbpercrtc __always_unused;
1596extern int i915_panel_ignore_lid __read_mostly;
1597extern unsigned int i915_powersave __read_mostly;
Eugeni Dodonovf45b5552011-12-09 17:16:37 -08001598extern int i915_semaphores __read_mostly;
Ben Widawskya35d9d32011-07-13 14:38:17 -07001599extern unsigned int i915_lvds_downclock __read_mostly;
Takashi Iwai121d5272012-03-20 13:07:06 +01001600extern int i915_lvds_channel_mode __read_mostly;
Keith Packard4415e632011-11-09 09:57:50 -08001601extern int i915_panel_use_ssc __read_mostly;
Ben Widawskya35d9d32011-07-13 14:38:17 -07001602extern int i915_vbt_sdvo_panel_type __read_mostly;
Keith Packardc0f372b32011-11-16 22:24:52 -08001603extern int i915_enable_rc6 __read_mostly;
Keith Packard4415e632011-11-09 09:57:50 -08001604extern int i915_enable_fbc __read_mostly;
Ben Widawskya35d9d32011-07-13 14:38:17 -07001605extern bool i915_enable_hangcheck __read_mostly;
Daniel Vetter650dc072012-04-02 10:08:35 +02001606extern int i915_enable_ppgtt __read_mostly;
Rodrigo Vivi105b7c12013-07-11 18:45:02 -03001607extern int i915_enable_psr __read_mostly;
Rodrigo Vivi0a3af262012-10-15 17:16:23 -03001608extern unsigned int i915_preliminary_hw_support __read_mostly;
Paulo Zanoni2124b722013-03-22 14:07:23 -03001609extern int i915_disable_power_well __read_mostly;
Paulo Zanoni3c4ca582013-05-31 16:33:23 -03001610extern int i915_enable_ips __read_mostly;
Jesse Barnes2385bdf2013-06-26 01:38:15 +03001611extern bool i915_fastboot __read_mostly;
Xiong Zhang0b74b502013-07-19 13:51:24 +08001612extern bool i915_prefault_disable __read_mostly;
Dave Airlieb3a83632005-09-30 18:37:36 +10001613
Dave Airlie6a9ee8a2010-02-01 15:38:10 +10001614extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1615extern int i915_resume(struct drm_device *dev);
Dave Airlie7c1c2872008-11-28 14:22:24 +10001616extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1617extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1618
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 /* i915_dma.c */
Daniel Vetterd05c6172012-04-26 23:28:09 +02001620void i915_update_dri1_breadcrumb(struct drm_device *dev);
Dave Airlie84b1fd12007-07-11 15:53:27 +10001621extern void i915_kernel_lost_context(struct drm_device * dev);
Dave Airlie22eae942005-11-10 22:16:34 +11001622extern int i915_driver_load(struct drm_device *, unsigned long flags);
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001623extern int i915_driver_unload(struct drm_device *);
Eric Anholt673a3942008-07-30 12:06:12 -07001624extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
Dave Airlie84b1fd12007-07-11 15:53:27 +10001625extern void i915_driver_lastclose(struct drm_device * dev);
Eric Anholt6c340ea2007-08-25 20:23:09 +10001626extern void i915_driver_preclose(struct drm_device *dev,
1627 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07001628extern void i915_driver_postclose(struct drm_device *dev,
1629 struct drm_file *file_priv);
Dave Airlie84b1fd12007-07-11 15:53:27 +10001630extern int i915_driver_device_is_agp(struct drm_device * dev);
Ben Widawskyc43b5632012-04-16 14:07:40 -07001631#ifdef CONFIG_COMPAT
Dave Airlie0d6aa602006-01-02 20:14:23 +11001632extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1633 unsigned long arg);
Ben Widawskyc43b5632012-04-16 14:07:40 -07001634#endif
Eric Anholt673a3942008-07-30 12:06:12 -07001635extern int i915_emit_box(struct drm_device *dev,
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001636 struct drm_clip_rect *box,
1637 int DR1, int DR4);
Ben Widawsky8e96d9c2012-06-04 14:42:56 -07001638extern int intel_gpu_reset(struct drm_device *dev);
Daniel Vetterd4b8bb22012-04-27 15:17:44 +02001639extern int i915_reset(struct drm_device *dev);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001640extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1641extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1642extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1643extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1644
Jesse Barnes073f34d2012-11-02 11:13:59 -07001645extern void intel_console_resume(struct work_struct *work);
Dave Airlieaf6061a2008-05-07 12:15:39 +10001646
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647/* i915_irq.c */
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03001648void i915_queue_hangcheck(struct drm_device *dev);
Ben Gamarif65d9422009-09-14 17:48:44 -04001649void i915_hangcheck_elapsed(unsigned long data);
Chris Wilson527f9e92010-11-11 01:16:58 +00001650void i915_handle_error(struct drm_device *dev, bool wedged);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Jesse Barnesf71d4af2011-06-28 13:00:41 -07001652extern void intel_irq_init(struct drm_device *dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01001653extern void intel_hpd_init(struct drm_device *dev);
Chris Wilson907b28c2013-07-19 20:36:52 +01001654extern void intel_pm_init(struct drm_device *dev);
1655
1656extern void intel_uncore_sanitize(struct drm_device *dev);
1657extern void intel_uncore_early_sanitize(struct drm_device *dev);
1658extern void intel_uncore_init(struct drm_device *dev);
1659extern void intel_uncore_reset(struct drm_device *dev);
1660extern void intel_uncore_clear_errors(struct drm_device *dev);
1661extern void intel_uncore_check_errors(struct drm_device *dev);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001662
Keith Packard7c463582008-11-04 02:03:27 -08001663void
1664i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1665
1666void
1667i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1668
Eric Anholt673a3942008-07-30 12:06:12 -07001669/* i915_gem.c */
1670int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1671 struct drm_file *file_priv);
1672int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1673 struct drm_file *file_priv);
1674int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1675 struct drm_file *file_priv);
1676int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1677 struct drm_file *file_priv);
1678int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1679 struct drm_file *file_priv);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001680int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1681 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07001682int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1683 struct drm_file *file_priv);
1684int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1685 struct drm_file *file_priv);
1686int i915_gem_execbuffer(struct drm_device *dev, void *data,
1687 struct drm_file *file_priv);
Jesse Barnes76446ca2009-12-17 22:05:42 -05001688int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1689 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07001690int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1691 struct drm_file *file_priv);
1692int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1693 struct drm_file *file_priv);
1694int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1695 struct drm_file *file_priv);
Ben Widawsky199adf42012-09-21 17:01:20 -07001696int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1697 struct drm_file *file);
1698int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1699 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -07001700int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1701 struct drm_file *file_priv);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001702int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1703 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07001704int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1705 struct drm_file *file_priv);
1706int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1707 struct drm_file *file_priv);
1708int i915_gem_set_tiling(struct drm_device *dev, void *data,
1709 struct drm_file *file_priv);
1710int i915_gem_get_tiling(struct drm_device *dev, void *data,
1711 struct drm_file *file_priv);
Eric Anholt5a125c32008-10-22 21:40:13 -07001712int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1713 struct drm_file *file_priv);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07001714int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1715 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07001716void i915_gem_load(struct drm_device *dev);
Chris Wilson42dcedd2012-11-15 11:32:30 +00001717void *i915_gem_object_alloc(struct drm_device *dev);
1718void i915_gem_object_free(struct drm_i915_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001719int i915_gem_init_object(struct drm_gem_object *obj);
Chris Wilson37e680a2012-06-07 15:38:42 +01001720void i915_gem_object_init(struct drm_i915_gem_object *obj,
1721 const struct drm_i915_gem_object_ops *ops);
Chris Wilson05394f32010-11-08 19:18:58 +00001722struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1723 size_t size);
Eric Anholt673a3942008-07-30 12:06:12 -07001724void i915_gem_free_object(struct drm_gem_object *obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07001725struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
1726 struct i915_address_space *vm);
1727void i915_gem_vma_destroy(struct i915_vma *vma);
Chris Wilson42dcedd2012-11-15 11:32:30 +00001728
Chris Wilson20217462010-11-23 15:26:33 +00001729int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07001730 struct i915_address_space *vm,
Chris Wilson20217462010-11-23 15:26:33 +00001731 uint32_t alignment,
Chris Wilson86a1ee22012-08-11 15:41:04 +01001732 bool map_and_fenceable,
1733 bool nonblocking);
Chris Wilson05394f32010-11-08 19:18:58 +00001734void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
Chris Wilson20217462010-11-23 15:26:33 +00001735int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
Chris Wilsondd624af2013-01-15 12:39:35 +00001736int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001737void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001738void i915_gem_lastclose(struct drm_device *dev);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001739
Chris Wilson37e680a2012-06-07 15:38:42 +01001740int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01001741static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1742{
Imre Deak67d5a502013-02-18 19:28:02 +02001743 struct sg_page_iter sg_iter;
Chris Wilson1cf83782012-10-10 12:11:52 +01001744
Imre Deak67d5a502013-02-18 19:28:02 +02001745 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
Imre Deak2db76d72013-03-26 15:14:18 +02001746 return sg_page_iter_page(&sg_iter);
Imre Deak67d5a502013-02-18 19:28:02 +02001747
1748 return NULL;
Chris Wilson9da3da62012-06-01 15:20:22 +01001749}
Chris Wilsona5570172012-09-04 21:02:54 +01001750static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1751{
1752 BUG_ON(obj->pages == NULL);
1753 obj->pages_pin_count++;
1754}
1755static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1756{
1757 BUG_ON(obj->pages_pin_count == 0);
1758 obj->pages_pin_count--;
1759}
1760
Chris Wilson54cf91d2010-11-25 18:00:26 +00001761int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
Ben Widawsky2911a352012-04-05 14:47:36 -07001762int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1763 struct intel_ring_buffer *to);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001764void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson9d7730912012-11-27 16:22:52 +00001765 struct intel_ring_buffer *ring);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001766
Dave Airlieff72145b2011-02-07 12:16:14 +10001767int i915_gem_dumb_create(struct drm_file *file_priv,
1768 struct drm_device *dev,
1769 struct drm_mode_create_dumb *args);
1770int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1771 uint32_t handle, uint64_t *offset);
1772int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
Akshay Joshi0206e352011-08-16 15:34:10 -04001773 uint32_t handle);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001774/**
1775 * Returns true if seq1 is later than seq2.
1776 */
1777static inline bool
1778i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1779{
1780 return (int32_t)(seq1 - seq2) >= 0;
1781}
1782
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02001783int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1784int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
Chris Wilson06d98132012-04-17 15:31:24 +01001785int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001786int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
Chris Wilson20217462010-11-23 15:26:33 +00001787
Chris Wilson9a5a53b2012-03-22 15:10:00 +00001788static inline bool
Chris Wilson1690e1e2011-12-14 13:57:08 +01001789i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1790{
1791 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1792 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1793 dev_priv->fence_regs[obj->fence_reg].pin_count++;
Chris Wilson9a5a53b2012-03-22 15:10:00 +00001794 return true;
1795 } else
1796 return false;
Chris Wilson1690e1e2011-12-14 13:57:08 +01001797}
1798
1799static inline void
1800i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1801{
1802 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1803 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonb8c3af72013-06-12 11:29:47 +01001804 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
Chris Wilson1690e1e2011-12-14 13:57:08 +01001805 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1806 }
1807}
1808
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001809void i915_gem_retire_requests(struct drm_device *dev);
Chris Wilsona71d8d92012-02-15 11:25:36 +00001810void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
Daniel Vetter33196de2012-11-14 17:14:05 +01001811int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
Daniel Vetterd6b2c792012-07-04 22:54:13 +02001812 bool interruptible);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001813static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
1814{
1815 return unlikely(atomic_read(&error->reset_counter)
1816 & I915_RESET_IN_PROGRESS_FLAG);
1817}
1818
1819static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1820{
1821 return atomic_read(&error->reset_counter) == I915_WEDGED;
1822}
Chris Wilsona71d8d92012-02-15 11:25:36 +00001823
Chris Wilson069efc12010-09-30 16:53:18 +01001824void i915_gem_reset(struct drm_device *dev);
Chris Wilson05394f32010-11-08 19:18:58 +00001825void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
Chris Wilson20217462010-11-23 15:26:33 +00001826int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1827 uint32_t read_domains,
1828 uint32_t write_domain);
Chris Wilsona8198ee2011-04-13 22:04:09 +01001829int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
Chris Wilson1070a422012-04-24 15:47:41 +01001830int __must_check i915_gem_init(struct drm_device *dev);
Daniel Vetterf691e2f2012-02-02 09:58:12 +01001831int __must_check i915_gem_init_hw(struct drm_device *dev);
Ben Widawskyb9524a12012-05-25 16:56:24 -07001832void i915_gem_l3_remap(struct drm_device *dev);
Daniel Vetterf691e2f2012-02-02 09:58:12 +01001833void i915_gem_init_swizzling(struct drm_device *dev);
Jesse Barnes79e53942008-11-07 14:24:08 -08001834void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07001835int __must_check i915_gpu_idle(struct drm_device *dev);
Chris Wilson20217462010-11-23 15:26:33 +00001836int __must_check i915_gem_idle(struct drm_device *dev);
Mika Kuoppala0025c072013-06-12 12:35:30 +03001837int __i915_add_request(struct intel_ring_buffer *ring,
1838 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03001839 struct drm_i915_gem_object *batch_obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03001840 u32 *seqno);
1841#define i915_add_request(ring, seqno) \
Dan Carpenter854c94a2013-06-18 10:29:58 +03001842 __i915_add_request(ring, NULL, NULL, seqno)
Ben Widawsky199b2bc2012-05-24 15:03:11 -07001843int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1844 uint32_t seqno);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001845int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
Chris Wilson20217462010-11-23 15:26:33 +00001846int __must_check
1847i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1848 bool write);
1849int __must_check
Chris Wilsondabdfe02012-03-26 10:10:27 +02001850i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1851int __must_check
Chris Wilson2da3b9b2011-04-14 09:41:17 +01001852i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1853 u32 alignment,
Chris Wilson20217462010-11-23 15:26:33 +00001854 struct intel_ring_buffer *pipelined);
Dave Airlie71acb5e2008-12-30 20:31:46 +10001855int i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00001856 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01001857 int id,
1858 int align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10001859void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00001860 struct drm_i915_gem_object *obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10001861void i915_gem_free_all_phys_object(struct drm_device *dev);
Chris Wilson05394f32010-11-08 19:18:58 +00001862void i915_gem_release(struct drm_device *dev, struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -07001863
Chris Wilson467cffb2011-03-07 10:42:03 +00001864uint32_t
Imre Deak0fa87792013-01-07 21:47:35 +02001865i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
1866uint32_t
Imre Deakd8651102013-01-07 21:47:33 +02001867i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1868 int tiling_mode, bool fenced);
Chris Wilson467cffb2011-03-07 10:42:03 +00001869
Chris Wilsone4ffd172011-04-04 09:44:39 +01001870int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1871 enum i915_cache_level cache_level);
1872
Daniel Vetter1286ff72012-05-10 15:25:09 +02001873struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1874 struct dma_buf *dma_buf);
1875
1876struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1877 struct drm_gem_object *gem_obj, int flags);
1878
Chris Wilson19b2dbd2013-06-12 10:15:12 +01001879void i915_gem_restore_fences(struct drm_device *dev);
1880
Ben Widawskya70a3142013-07-31 16:59:56 -07001881unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
1882 struct i915_address_space *vm);
1883bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
1884bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
1885 struct i915_address_space *vm);
1886unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
1887 struct i915_address_space *vm);
1888struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
1889 struct i915_address_space *vm);
1890/* Some GGTT VM helpers */
1891#define obj_to_ggtt(obj) \
1892 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
1893static inline bool i915_is_ggtt(struct i915_address_space *vm)
1894{
1895 struct i915_address_space *ggtt =
1896 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
1897 return vm == ggtt;
1898}
1899
1900static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
1901{
1902 return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
1903}
1904
1905static inline unsigned long
1906i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
1907{
1908 return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
1909}
1910
1911static inline unsigned long
1912i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
1913{
1914 return i915_gem_obj_size(obj, obj_to_ggtt(obj));
1915}
Ben Widawskyc37e2202013-07-31 16:59:58 -07001916
1917static inline int __must_check
1918i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
1919 uint32_t alignment,
1920 bool map_and_fenceable,
1921 bool nonblocking)
1922{
1923 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
1924 map_and_fenceable, nonblocking);
1925}
Ben Widawskya70a3142013-07-31 16:59:56 -07001926#undef obj_to_ggtt
1927
Ben Widawsky254f9652012-06-04 14:42:42 -07001928/* i915_gem_context.c */
1929void i915_gem_context_init(struct drm_device *dev);
1930void i915_gem_context_fini(struct drm_device *dev);
Ben Widawsky254f9652012-06-04 14:42:42 -07001931void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
Ben Widawskye0556842012-06-04 14:42:46 -07001932int i915_switch_context(struct intel_ring_buffer *ring,
1933 struct drm_file *file, int to_id);
Mika Kuoppaladce32712013-04-30 13:30:33 +03001934void i915_gem_context_free(struct kref *ctx_ref);
1935static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
1936{
1937 kref_get(&ctx->ref);
1938}
1939
1940static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
1941{
1942 kref_put(&ctx->ref, i915_gem_context_free);
1943}
1944
Mika Kuoppalac0bb6172013-06-12 12:35:29 +03001945struct i915_ctx_hang_stats * __must_check
Chris Wilson11fa3382013-07-03 17:22:06 +03001946i915_gem_context_get_hang_stats(struct drm_device *dev,
Mika Kuoppalac0bb6172013-06-12 12:35:29 +03001947 struct drm_file *file,
1948 u32 id);
Ben Widawsky84624812012-06-04 14:42:54 -07001949int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1950 struct drm_file *file);
1951int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1952 struct drm_file *file);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001953
Daniel Vetter76aaf222010-11-05 22:23:30 +01001954/* i915_gem_gtt.c */
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001955void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
Daniel Vetter7bddb012012-02-09 17:15:47 +01001956void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
1957 struct drm_i915_gem_object *obj,
1958 enum i915_cache_level cache_level);
1959void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1960 struct drm_i915_gem_object *obj);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001961
Daniel Vetter76aaf222010-11-05 22:23:30 +01001962void i915_gem_restore_gtt_mappings(struct drm_device *dev);
Daniel Vetter74163902012-02-15 23:50:21 +01001963int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
1964void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
Chris Wilsone4ffd172011-04-04 09:44:39 +01001965 enum i915_cache_level cache_level);
Chris Wilson05394f32010-11-08 19:18:58 +00001966void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
Daniel Vetter74163902012-02-15 23:50:21 +01001967void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
Ben Widawskyd7e50082012-12-18 10:31:25 -08001968void i915_gem_init_global_gtt(struct drm_device *dev);
1969void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
1970 unsigned long mappable_end, unsigned long end);
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001971int i915_gem_gtt_init(struct drm_device *dev);
Ben Widawskyd09105c2012-11-15 12:06:09 -08001972static inline void i915_gem_chipset_flush(struct drm_device *dev)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001973{
1974 if (INTEL_INFO(dev)->gen < 6)
1975 intel_gtt_chipset_flush();
1976}
1977
Daniel Vetter76aaf222010-11-05 22:23:30 +01001978
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001979/* i915_gem_evict.c */
Chris Wilson20217462010-11-23 15:26:33 +00001980int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
Chris Wilson42d6ab42012-07-26 11:49:32 +01001981 unsigned alignment,
1982 unsigned cache_level,
Chris Wilson86a1ee22012-08-11 15:41:04 +01001983 bool mappable,
1984 bool nonblock);
Chris Wilson6c085a72012-08-20 11:40:46 +02001985int i915_gem_evict_everything(struct drm_device *dev);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001986
Chris Wilson9797fbf2012-04-24 15:47:39 +01001987/* i915_gem_stolen.c */
1988int i915_gem_init_stolen(struct drm_device *dev);
Chris Wilson11be49e2012-11-15 11:32:20 +00001989int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
1990void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
Chris Wilson9797fbf2012-04-24 15:47:39 +01001991void i915_gem_cleanup_stolen(struct drm_device *dev);
Chris Wilson0104fdb2012-11-15 11:32:26 +00001992struct drm_i915_gem_object *
1993i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
Chris Wilson866d12b2013-02-19 13:31:37 -08001994struct drm_i915_gem_object *
1995i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
1996 u32 stolen_offset,
1997 u32 gtt_offset,
1998 u32 size);
Chris Wilson0104fdb2012-11-15 11:32:26 +00001999void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
Chris Wilson9797fbf2012-04-24 15:47:39 +01002000
Eric Anholt673a3942008-07-30 12:06:12 -07002001/* i915_gem_tiling.c */
Chris Wilson2c1792a2013-08-01 18:39:55 +01002002static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Chris Wilsone9b73c62012-12-03 21:03:14 +00002003{
2004 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2005
2006 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2007 obj->tiling_mode != I915_TILING_NONE;
2008}
2009
Eric Anholt673a3942008-07-30 12:06:12 -07002010void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
Chris Wilson05394f32010-11-08 19:18:58 +00002011void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
2012void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002013
2014/* i915_gem_debug.c */
Chris Wilson05394f32010-11-08 19:18:58 +00002015void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
Eric Anholt673a3942008-07-30 12:06:12 -07002016 const char *where, uint32_t mark);
Chris Wilson23bc5982010-09-29 16:10:57 +01002017#if WATCH_LISTS
2018int i915_verify_lists(struct drm_device *dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002019#else
Chris Wilson23bc5982010-09-29 16:10:57 +01002020#define i915_verify_lists(dev) 0
Eric Anholt673a3942008-07-30 12:06:12 -07002021#endif
Chris Wilson05394f32010-11-08 19:18:58 +00002022void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
2023 int handle);
2024void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
Eric Anholt673a3942008-07-30 12:06:12 -07002025 const char *where, uint32_t mark);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
Ben Gamari20172632009-02-17 20:08:50 -05002027/* i915_debugfs.c */
Ben Gamari27c202a2009-07-01 22:26:52 -04002028int i915_debugfs_init(struct drm_minor *minor);
2029void i915_debugfs_cleanup(struct drm_minor *minor);
Mika Kuoppala84734a02013-07-12 16:50:57 +03002030
2031/* i915_gpu_error.c */
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03002032__printf(2, 3)
2033void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
Mika Kuoppalafc16b482013-06-06 15:18:39 +03002034int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
2035 const struct i915_error_state_file_priv *error);
Mika Kuoppala4dc955f2013-06-06 15:18:41 +03002036int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
2037 size_t count, loff_t pos);
2038static inline void i915_error_state_buf_release(
2039 struct drm_i915_error_state_buf *eb)
2040{
2041 kfree(eb->buf);
2042}
Mika Kuoppala84734a02013-07-12 16:50:57 +03002043void i915_capture_error_state(struct drm_device *dev);
2044void i915_error_state_get(struct drm_device *dev,
2045 struct i915_error_state_file_priv *error_priv);
2046void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2047void i915_destroy_error_state(struct drm_device *dev);
2048
2049void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2050const char *i915_cache_level_str(int type);
Ben Gamari20172632009-02-17 20:08:50 -05002051
Jesse Barnes317c35d2008-08-25 15:11:06 -07002052/* i915_suspend.c */
2053extern int i915_save_state(struct drm_device *dev);
2054extern int i915_restore_state(struct drm_device *dev);
2055
Daniel Vetterd8157a32013-01-25 17:53:20 +01002056/* i915_ums.c */
2057void i915_save_display_reg(struct drm_device *dev);
2058void i915_restore_display_reg(struct drm_device *dev);
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002059
Ben Widawsky0136db582012-04-10 21:17:01 -07002060/* i915_sysfs.c */
2061void i915_setup_sysfs(struct drm_device *dev_priv);
2062void i915_teardown_sysfs(struct drm_device *dev_priv);
2063
Chris Wilsonf899fc62010-07-20 15:44:45 -07002064/* intel_i2c.c */
2065extern int intel_setup_gmbus(struct drm_device *dev);
2066extern void intel_teardown_gmbus(struct drm_device *dev);
Jan-Simon Möller8f375e12013-05-06 14:52:08 +02002067static inline bool intel_gmbus_is_port_valid(unsigned port)
Daniel Kurtz3bd7d902012-03-28 02:36:14 +08002068{
Daniel Kurtz2ed06c92012-03-28 02:36:15 +08002069 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
Daniel Kurtz3bd7d902012-03-28 02:36:14 +08002070}
2071
2072extern struct i2c_adapter *intel_gmbus_get_adapter(
2073 struct drm_i915_private *dev_priv, unsigned port);
Chris Wilsone957d772010-09-24 12:52:03 +01002074extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
2075extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
Jan-Simon Möller8f375e12013-05-06 14:52:08 +02002076static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
Chris Wilsonb8232e92010-09-28 16:41:32 +01002077{
2078 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
2079}
Chris Wilsonf899fc62010-07-20 15:44:45 -07002080extern void intel_i2c_reset(struct drm_device *dev);
2081
Chris Wilson3b617962010-08-24 09:02:58 +01002082/* intel_opregion.c */
Chris Wilson44834a62010-08-19 16:09:23 +01002083extern int intel_opregion_setup(struct drm_device *dev);
2084#ifdef CONFIG_ACPI
2085extern void intel_opregion_init(struct drm_device *dev);
2086extern void intel_opregion_fini(struct drm_device *dev);
Chris Wilson3b617962010-08-24 09:02:58 +01002087extern void intel_opregion_asle_intr(struct drm_device *dev);
Len Brown65e082c2008-10-24 17:18:10 -04002088#else
Chris Wilson44834a62010-08-19 16:09:23 +01002089static inline void intel_opregion_init(struct drm_device *dev) { return; }
2090static inline void intel_opregion_fini(struct drm_device *dev) { return; }
Chris Wilson3b617962010-08-24 09:02:58 +01002091static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
Len Brown65e082c2008-10-24 17:18:10 -04002092#endif
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +01002093
Jesse Barnes723bfd72010-10-07 16:01:13 -07002094/* intel_acpi.c */
2095#ifdef CONFIG_ACPI
2096extern void intel_register_dsm_handler(void);
2097extern void intel_unregister_dsm_handler(void);
2098#else
2099static inline void intel_register_dsm_handler(void) { return; }
2100static inline void intel_unregister_dsm_handler(void) { return; }
2101#endif /* CONFIG_ACPI */
2102
Jesse Barnes79e53942008-11-07 14:24:08 -08002103/* modesetting */
Daniel Vetterf8175862012-04-10 15:50:11 +02002104extern void intel_modeset_init_hw(struct drm_device *dev);
Imre Deak7d708ee2013-04-17 14:04:50 +03002105extern void intel_modeset_suspend_hw(struct drm_device *dev);
Jesse Barnes79e53942008-11-07 14:24:08 -08002106extern void intel_modeset_init(struct drm_device *dev);
Chris Wilson2c7111d2011-03-29 10:40:27 +01002107extern void intel_modeset_gem_init(struct drm_device *dev);
Jesse Barnes79e53942008-11-07 14:24:08 -08002108extern void intel_modeset_cleanup(struct drm_device *dev);
Dave Airlie28d52042009-09-21 14:33:58 +10002109extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
Daniel Vetter45e2b5f2012-11-23 18:16:34 +01002110extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2111 bool force_restore);
Daniel Vetter44cec742013-01-25 17:53:21 +01002112extern void i915_redisable_vga(struct drm_device *dev);
Adam Jacksonee5382a2010-04-23 11:17:39 -04002113extern bool intel_fbc_enabled(struct drm_device *dev);
Chris Wilson43a95392011-07-08 12:22:36 +01002114extern void intel_disable_fbc(struct drm_device *dev);
Jesse Barnes7648fa92010-05-20 14:28:11 -07002115extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
Paulo Zanonidde86e22012-12-01 12:04:25 -02002116extern void intel_init_pch_refclk(struct drm_device *dev);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08002117extern void gen6_set_rps(struct drm_device *dev, u8 val);
Jesse Barnes0a073b82013-04-17 15:54:58 -07002118extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2119extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
2120extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
Akshay Joshi0206e352011-08-16 15:34:10 -04002121extern void intel_detect_pch(struct drm_device *dev);
2122extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
Ben Widawsky0136db582012-04-10 21:17:01 -07002123extern int intel_enable_rc6(const struct drm_device *dev);
Zhenyu Wang3bad0782010-04-07 16:15:53 +08002124
Ben Widawsky2911a352012-04-05 14:47:36 -07002125extern bool i915_semaphore_is_enabled(struct drm_device *dev);
Ben Widawskyc0c7bab2012-07-12 11:01:05 -07002126int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2127 struct drm_file *file);
Jesse Barnes575155a2012-03-28 13:39:37 -07002128
Chris Wilson6ef3d422010-08-04 20:26:07 +01002129/* overlay */
2130extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03002131extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
2132 struct intel_overlay_error_state *error);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +00002133
2134extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03002135extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +00002136 struct drm_device *dev,
2137 struct intel_display_error_state *error);
Chris Wilson6ef3d422010-08-04 20:26:07 +01002138
Ben Widawskyb7287d82011-04-25 11:22:22 -07002139/* On SNB platform, before reading ring registers forcewake bit
2140 * must be set to prevent GT core from power down and stale values being
2141 * returned.
2142 */
Ben Widawskyfcca7922011-04-25 11:23:07 -07002143void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
2144void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
Ben Widawskyb7287d82011-04-25 11:22:22 -07002145
Ben Widawsky42c05262012-09-26 10:34:00 -07002146int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2147int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
Jani Nikula59de0812013-05-22 15:36:16 +03002148
2149/* intel_sideband.c */
Jani Nikula64936252013-05-22 15:36:20 +03002150u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2151void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2152u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
Jani Nikulaae992582013-05-22 15:36:19 +03002153u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
2154void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
Jani Nikula59de0812013-05-22 15:36:16 +03002155u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2156 enum intel_sbi_destination destination);
2157void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2158 enum intel_sbi_destination destination);
Jesse Barnes0a073b82013-04-17 15:54:58 -07002159
Jesse Barnes855ba3b2013-04-17 15:54:57 -07002160int vlv_gpu_freq(int ddr_freq, int val);
2161int vlv_freq_opcode(int ddr_freq, int val);
Ben Widawsky42c05262012-09-26 10:34:00 -07002162
Chris Wilson6af5d922013-07-19 20:36:53 +01002163#define __i915_read(x) \
Chris Wilsondba8e412013-07-19 20:36:54 +01002164 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
Chris Wilson6af5d922013-07-19 20:36:53 +01002165__i915_read(8)
2166__i915_read(16)
2167__i915_read(32)
2168__i915_read(64)
Keith Packard5f753772010-11-22 09:24:22 +00002169#undef __i915_read
2170
Chris Wilson6af5d922013-07-19 20:36:53 +01002171#define __i915_write(x) \
Chris Wilsondba8e412013-07-19 20:36:54 +01002172 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
Chris Wilson6af5d922013-07-19 20:36:53 +01002173__i915_write(8)
2174__i915_write(16)
2175__i915_write(32)
2176__i915_write(64)
Keith Packard5f753772010-11-22 09:24:22 +00002177#undef __i915_write
2178
Chris Wilsondba8e412013-07-19 20:36:54 +01002179#define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
2180#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
Keith Packard5f753772010-11-22 09:24:22 +00002181
Chris Wilsondba8e412013-07-19 20:36:54 +01002182#define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
2183#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
2184#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
2185#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
Keith Packard5f753772010-11-22 09:24:22 +00002186
Chris Wilsondba8e412013-07-19 20:36:54 +01002187#define I915_READ(reg) i915_read32(dev_priv, (reg), true)
2188#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
2189#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
2190#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
Keith Packard5f753772010-11-22 09:24:22 +00002191
Chris Wilsondba8e412013-07-19 20:36:54 +01002192#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
2193#define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
Zou Nan haicae58522010-11-09 17:17:32 +08002194
2195#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2196#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2197
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02002198/* "Broadcast RGB" property */
2199#define INTEL_BROADCAST_RGB_AUTO 0
2200#define INTEL_BROADCAST_RGB_FULL 1
2201#define INTEL_BROADCAST_RGB_LIMITED 2
Yuanhan Liuba4f01a2010-11-08 17:09:41 +08002202
Ville Syrjälä766aa1c2013-01-25 21:44:46 +02002203static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2204{
2205 if (HAS_PCH_SPLIT(dev))
2206 return CPU_VGACNTRL;
2207 else if (IS_VALLEYVIEW(dev))
2208 return VLV_VGACNTRL;
2209 else
2210 return VGACNTRL;
2211}
2212
Ville Syrjälä2bb46292013-02-22 16:12:51 +02002213static inline void __user *to_user_ptr(u64 address)
2214{
2215 return (void __user *)(uintptr_t)address;
2216}
2217
Imre Deakdf977292013-05-21 20:03:17 +03002218static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
2219{
2220 unsigned long j = msecs_to_jiffies(m);
2221
2222 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2223}
2224
2225static inline unsigned long
2226timespec_to_jiffies_timeout(const struct timespec *value)
2227{
2228 unsigned long j = timespec_to_jiffies(value);
2229
2230 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2231}
2232
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233#endif