blob: 826fcaef25c1bc23bfb5f770357582befc4214bd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Dave Airliebc54fd12005-06-23 22:46:46 +10004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10007 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110028 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
Chris Wilsone9b73c62012-12-03 21:03:14 +000033#include <uapi/drm/i915_drm.h>
34
Jesse Barnes585fb112008-07-29 11:54:06 -070035#include "i915_reg.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080036#include "intel_bios.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080037#include "intel_ringbuffer.h"
Keith Packard0839ccb2008-10-30 19:38:48 -070038#include <linux/io-mapping.h>
Chris Wilsonf899fc62010-07-20 15:44:45 -070039#include <linux/i2c.h>
Daniel Vetterc167a6f2012-02-28 00:43:09 +010040#include <linux/i2c-algo-bit.h>
Daniel Vetter0ade6382010-08-24 22:18:41 +020041#include <drm/intel-gtt.h>
Matthew Garrettaaa6fd22011-08-12 12:11:33 +020042#include <linux/backlight.h>
Ben Widawsky2911a352012-04-05 14:47:36 -070043#include <linux/intel-iommu.h>
Daniel Vetter742cbee2012-04-27 15:17:39 +020044#include <linux/kref.h>
Daniel Vetter9ee32fea2012-12-01 13:53:48 +010045#include <linux/pm_qos.h>
Jesse Barnes585fb112008-07-29 11:54:06 -070046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047/* General customization:
48 */
49
50#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
51
52#define DRIVER_NAME "i915"
53#define DRIVER_DESC "Intel Graphics"
Eric Anholt673a3942008-07-30 12:06:12 -070054#define DRIVER_DATE "20080730"
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Jesse Barnes317c35d2008-08-25 15:11:06 -070056enum pipe {
Jesse Barnes752aa882013-10-31 18:55:49 +020057 INVALID_PIPE = -1,
Jesse Barnes317c35d2008-08-25 15:11:06 -070058 PIPE_A = 0,
59 PIPE_B,
Jesse Barnes9db4a9c2011-02-07 12:26:52 -080060 PIPE_C,
Antti Koskipaaa57c7742014-02-04 14:22:24 +020061 _PIPE_EDP,
62 I915_MAX_PIPES = _PIPE_EDP
Jesse Barnes317c35d2008-08-25 15:11:06 -070063};
Jesse Barnes9db4a9c2011-02-07 12:26:52 -080064#define pipe_name(p) ((p) + 'A')
Jesse Barnes317c35d2008-08-25 15:11:06 -070065
Paulo Zanonia5c961d2012-10-24 15:59:34 -020066enum transcoder {
67 TRANSCODER_A = 0,
68 TRANSCODER_B,
69 TRANSCODER_C,
Antti Koskipaaa57c7742014-02-04 14:22:24 +020070 TRANSCODER_EDP,
71 I915_MAX_TRANSCODERS
Paulo Zanonia5c961d2012-10-24 15:59:34 -020072};
73#define transcoder_name(t) ((t) + 'A')
74
Jesse Barnes80824002009-09-10 15:28:06 -070075enum plane {
76 PLANE_A = 0,
77 PLANE_B,
Jesse Barnes9db4a9c2011-02-07 12:26:52 -080078 PLANE_C,
Jesse Barnes80824002009-09-10 15:28:06 -070079};
Jesse Barnes9db4a9c2011-02-07 12:26:52 -080080#define plane_name(p) ((p) + 'A')
Keith Packard52440212008-11-18 09:30:25 -080081
Damien Lespiau22d3fd462014-02-07 19:12:49 +000082#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites + (s) + 'A')
Ville Syrjälä06da8da2013-04-17 17:48:51 +030083
Eugeni Dodonov2b139522012-03-29 12:32:22 -030084enum port {
85 PORT_A = 0,
86 PORT_B,
87 PORT_C,
88 PORT_D,
89 PORT_E,
90 I915_MAX_PORTS
91};
92#define port_name(p) ((p) + 'A')
93
Chon Ming Leee4607fc2013-11-06 14:36:35 +080094#define I915_NUM_PHYS_VLV 1
95
96enum dpio_channel {
97 DPIO_CH0,
98 DPIO_CH1
99};
100
101enum dpio_phy {
102 DPIO_PHY0,
103 DPIO_PHY1
104};
105
Paulo Zanonib97186f2013-05-03 12:15:36 -0300106enum intel_display_power_domain {
107 POWER_DOMAIN_PIPE_A,
108 POWER_DOMAIN_PIPE_B,
109 POWER_DOMAIN_PIPE_C,
110 POWER_DOMAIN_PIPE_A_PANEL_FITTER,
111 POWER_DOMAIN_PIPE_B_PANEL_FITTER,
112 POWER_DOMAIN_PIPE_C_PANEL_FITTER,
113 POWER_DOMAIN_TRANSCODER_A,
114 POWER_DOMAIN_TRANSCODER_B,
115 POWER_DOMAIN_TRANSCODER_C,
Imre Deakf52e3532013-10-16 17:25:48 +0300116 POWER_DOMAIN_TRANSCODER_EDP,
Ville Syrjäläcdf8dd72013-09-16 17:38:30 +0300117 POWER_DOMAIN_VGA,
Imre Deakfbeeaa22013-11-25 17:15:28 +0200118 POWER_DOMAIN_AUDIO,
Imre Deakbaa70702013-10-25 17:36:48 +0300119 POWER_DOMAIN_INIT,
Imre Deakbddc7642013-10-16 17:25:49 +0300120
121 POWER_DOMAIN_NUM,
Paulo Zanonib97186f2013-05-03 12:15:36 -0300122};
123
Imre Deakbddc7642013-10-16 17:25:49 +0300124#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
125
Paulo Zanonib97186f2013-05-03 12:15:36 -0300126#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
127#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
128 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
Imre Deakf52e3532013-10-16 17:25:48 +0300129#define POWER_DOMAIN_TRANSCODER(tran) \
130 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
131 (tran) + POWER_DOMAIN_TRANSCODER_A)
Paulo Zanonib97186f2013-05-03 12:15:36 -0300132
Imre Deakbddc7642013-10-16 17:25:49 +0300133#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
134 BIT(POWER_DOMAIN_PIPE_A) | \
135 BIT(POWER_DOMAIN_TRANSCODER_EDP))
Paulo Zanoni6745a2c2013-11-02 21:07:34 -0700136#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
137 BIT(POWER_DOMAIN_PIPE_A) | \
138 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
139 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
Imre Deakbddc7642013-10-16 17:25:49 +0300140
Egbert Eich1d843f92013-02-25 12:06:49 -0500141enum hpd_pin {
142 HPD_NONE = 0,
143 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
144 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
145 HPD_CRT,
146 HPD_SDVO_B,
147 HPD_SDVO_C,
148 HPD_PORT_B,
149 HPD_PORT_C,
150 HPD_PORT_D,
151 HPD_NUM_PINS
152};
153
Chris Wilson2a2d5482012-12-03 11:49:06 +0000154#define I915_GEM_GPU_DOMAINS \
155 (I915_GEM_DOMAIN_RENDER | \
156 I915_GEM_DOMAIN_SAMPLER | \
157 I915_GEM_DOMAIN_COMMAND | \
158 I915_GEM_DOMAIN_INSTRUCTION | \
159 I915_GEM_DOMAIN_VERTEX)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700160
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700161#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800162
Daniel Vetter6c2b7c122012-07-05 09:50:24 +0200163#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
164 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
165 if ((intel_encoder)->base.crtc == (__crtc))
166
Jesse Barnes53f5e3c2014-02-07 12:48:15 -0800167#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
168 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
169 if ((intel_connector)->base.encoder == (__encoder))
170
Daniel Vettere7b903d2013-06-05 13:34:14 +0200171struct drm_i915_private;
172
Daniel Vettere2b78262013-06-07 23:10:03 +0200173enum intel_dpll_id {
174 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
175 /* real shared dpll ids must be >= 0 */
176 DPLL_ID_PCH_PLL_A,
177 DPLL_ID_PCH_PLL_B,
178};
Jesse Barnesee7b9f92012-04-20 17:11:53 +0100179#define I915_NUM_PLLS 2
180
Daniel Vetter53589012013-06-05 13:34:16 +0200181struct intel_dpll_hw_state {
Daniel Vetter66e985c2013-06-05 13:34:20 +0200182 uint32_t dpll;
Daniel Vetter8bcc2792013-06-05 13:34:28 +0200183 uint32_t dpll_md;
Daniel Vetter66e985c2013-06-05 13:34:20 +0200184 uint32_t fp0;
185 uint32_t fp1;
Daniel Vetter53589012013-06-05 13:34:16 +0200186};
187
Daniel Vetter46edb022013-06-05 13:34:12 +0200188struct intel_shared_dpll {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 int refcount; /* count of number of CRTCs sharing this PLL */
190 int active; /* count of number of active CRTCs (i.e. DPMS on) */
191 bool on; /* is the PLL actually active? Disabled during modeset */
Daniel Vetter46edb022013-06-05 13:34:12 +0200192 const char *name;
193 /* should match the index in the dev_priv->shared_dplls array */
194 enum intel_dpll_id id;
Daniel Vetter53589012013-06-05 13:34:16 +0200195 struct intel_dpll_hw_state hw_state;
Daniel Vetter15bdd4c2013-06-05 13:34:23 +0200196 void (*mode_set)(struct drm_i915_private *dev_priv,
197 struct intel_shared_dpll *pll);
Daniel Vettere7b903d2013-06-05 13:34:14 +0200198 void (*enable)(struct drm_i915_private *dev_priv,
199 struct intel_shared_dpll *pll);
200 void (*disable)(struct drm_i915_private *dev_priv,
201 struct intel_shared_dpll *pll);
Daniel Vetter53589012013-06-05 13:34:16 +0200202 bool (*get_hw_state)(struct drm_i915_private *dev_priv,
203 struct intel_shared_dpll *pll,
204 struct intel_dpll_hw_state *hw_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
Daniel Vettere69d0bc2012-11-29 15:59:36 +0100207/* Used by dp and fdi links */
208struct intel_link_m_n {
209 uint32_t tu;
210 uint32_t gmch_m;
211 uint32_t gmch_n;
212 uint32_t link_m;
213 uint32_t link_n;
214};
215
216void intel_link_compute_m_n(int bpp, int nlanes,
217 int pixel_clock, int link_clock,
218 struct intel_link_m_n *m_n);
219
Paulo Zanoni6441ab52012-10-05 12:05:58 -0300220struct intel_ddi_plls {
221 int spll_refcount;
222 int wrpll1_refcount;
223 int wrpll2_refcount;
224};
225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226/* Interface history:
227 *
228 * 1.1: Original.
Dave Airlie0d6aa602006-01-02 20:14:23 +1100229 * 1.2: Add Power Management
230 * 1.3: Add vblank support
Dave Airliede227f52006-01-25 15:31:43 +1100231 * 1.4: Fix cmdbuffer path, add heap destroy
Dave Airlie702880f2006-06-24 17:07:34 +1000232 * 1.5: Add vblank pipe configuration
=?utf-8?q?Michel_D=C3=A4nzer?=2228ed62006-10-25 01:05:09 +1000233 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
234 * - Support vertical blank on secondary display pipe
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 */
236#define DRIVER_MAJOR 1
=?utf-8?q?Michel_D=C3=A4nzer?=2228ed62006-10-25 01:05:09 +1000237#define DRIVER_MINOR 6
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238#define DRIVER_PATCHLEVEL 0
239
Chris Wilson23bc5982010-09-29 16:10:57 +0100240#define WATCH_LISTS 0
Chris Wilson42d6ab42012-07-26 11:49:32 +0100241#define WATCH_GTT 0
Eric Anholt673a3942008-07-30 12:06:12 -0700242
Dave Airlie71acb5e2008-12-30 20:31:46 +1000243#define I915_GEM_PHYS_CURSOR_0 1
244#define I915_GEM_PHYS_CURSOR_1 2
245#define I915_GEM_PHYS_OVERLAY_REGS 3
246#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
247
248struct drm_i915_gem_phys_object {
249 int id;
250 struct page **page_list;
251 drm_dma_handle_t *handle;
Chris Wilson05394f32010-11-08 19:18:58 +0000252 struct drm_i915_gem_object *cur_obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +1000253};
254
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700255struct opregion_header;
256struct opregion_acpi;
257struct opregion_swsci;
258struct opregion_asle;
259
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +0100260struct intel_opregion {
Ben Widawsky5bc44182012-04-16 14:07:42 -0700261 struct opregion_header __iomem *header;
262 struct opregion_acpi __iomem *acpi;
263 struct opregion_swsci __iomem *swsci;
Jani Nikulaebde53c2013-09-02 10:38:59 +0300264 u32 swsci_gbda_sub_functions;
265 u32 swsci_sbcb_sub_functions;
Ben Widawsky5bc44182012-04-16 14:07:42 -0700266 struct opregion_asle __iomem *asle;
267 void __iomem *vbt;
Chris Wilson01fe9db2011-01-16 19:37:30 +0000268 u32 __iomem *lid_state;
Jani Nikula91a60f22013-10-31 18:55:48 +0200269 struct work_struct asle_work;
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +0100270};
Chris Wilson44834a62010-08-19 16:09:23 +0100271#define OPREGION_SIZE (8*1024)
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +0100272
Chris Wilson6ef3d422010-08-04 20:26:07 +0100273struct intel_overlay;
274struct intel_overlay_error_state;
275
Dave Airlie7c1c2872008-11-28 14:22:24 +1000276struct drm_i915_master_private {
277 drm_local_map_t *sarea;
278 struct _drm_i915_sarea *sarea_priv;
279};
Jesse Barnesde151cf2008-11-12 10:03:55 -0800280#define I915_FENCE_REG_NONE -1
Ville Syrjälä42b5aea2013-04-09 13:02:47 +0300281#define I915_MAX_NUM_FENCES 32
282/* 32 fences + sign bit for FENCE_REG_NONE */
283#define I915_MAX_NUM_FENCE_BITS 6
Jesse Barnesde151cf2008-11-12 10:03:55 -0800284
285struct drm_i915_fence_reg {
Daniel Vetter007cc8a2010-04-28 11:02:31 +0200286 struct list_head lru_list;
Chris Wilsoncaea7472010-11-12 13:53:37 +0000287 struct drm_i915_gem_object *obj;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100288 int pin_count;
Jesse Barnesde151cf2008-11-12 10:03:55 -0800289};
Dave Airlie7c1c2872008-11-28 14:22:24 +1000290
yakui_zhao9b9d1722009-05-31 17:17:17 +0800291struct sdvo_device_mapping {
Chris Wilsone957d772010-09-24 12:52:03 +0100292 u8 initialized;
yakui_zhao9b9d1722009-05-31 17:17:17 +0800293 u8 dvo_port;
294 u8 slave_addr;
295 u8 dvo_wiring;
Chris Wilsone957d772010-09-24 12:52:03 +0100296 u8 i2c_pin;
Adam Jacksonb1083332010-04-23 16:07:40 -0400297 u8 ddc_pin;
yakui_zhao9b9d1722009-05-31 17:17:17 +0800298};
299
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +0000300struct intel_display_error_state;
301
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700302struct drm_i915_error_state {
Daniel Vetter742cbee2012-04-27 15:17:39 +0200303 struct kref ref;
Ben Widawsky585b0282014-01-30 00:19:37 -0800304 struct timeval time;
305
306 /* Generic register state */
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700307 u32 eir;
308 u32 pgtbl_er;
Ben Widawskybe998e22012-04-26 16:03:00 -0700309 u32 ier;
Ben Widawskyb9a39062012-06-04 14:42:52 -0700310 u32 ccid;
Chris Wilson0f3b6842013-01-15 12:05:55 +0000311 u32 derrmr;
312 u32 forcewake;
Ben Widawsky585b0282014-01-30 00:19:37 -0800313 u32 error; /* gen6+ */
314 u32 err_int; /* gen7 */
315 u32 done_reg;
Ben Widawsky91ec5d12014-01-30 00:19:39 -0800316 u32 gac_eco;
317 u32 gam_ecochk;
318 u32 gab_ctl;
319 u32 gfx_mode;
Ben Widawsky585b0282014-01-30 00:19:37 -0800320 u32 extra_instdone[I915_NUM_INSTDONE_REG];
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800321 u32 pipestat[I915_MAX_PIPES];
Ben Widawsky585b0282014-01-30 00:19:37 -0800322 u64 fence[I915_MAX_NUM_FENCES];
323 struct intel_overlay_error_state *overlay;
324 struct intel_display_error_state *display;
325
Chris Wilson52d39a22012-02-15 11:25:37 +0000326 struct drm_i915_error_ring {
Chris Wilson372fbb82014-01-27 13:52:34 +0000327 bool valid;
Ben Widawsky362b8af2014-01-30 00:19:38 -0800328 /* Software tracked state */
329 bool waiting;
330 int hangcheck_score;
331 enum intel_ring_hangcheck_action hangcheck_action;
332 int num_requests;
333
334 /* our own tracking of ring head and tail */
335 u32 cpu_ring_head;
336 u32 cpu_ring_tail;
337
338 u32 semaphore_seqno[I915_NUM_RINGS - 1];
339
340 /* Register state */
341 u32 tail;
342 u32 head;
343 u32 ctl;
344 u32 hws;
345 u32 ipeir;
346 u32 ipehr;
347 u32 instdone;
348 u32 acthd;
349 u32 bbstate;
350 u32 instpm;
351 u32 instps;
352 u32 seqno;
353 u64 bbaddr;
354 u32 fault_reg;
355 u32 faddr;
356 u32 rc_psmi; /* sleep state */
357 u32 semaphore_mboxes[I915_NUM_RINGS - 1];
358
Chris Wilson52d39a22012-02-15 11:25:37 +0000359 struct drm_i915_error_object {
360 int page_count;
361 u32 gtt_offset;
362 u32 *pages[0];
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200363 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
Ben Widawsky362b8af2014-01-30 00:19:38 -0800364
Chris Wilson52d39a22012-02-15 11:25:37 +0000365 struct drm_i915_error_request {
366 long jiffies;
367 u32 seqno;
Chris Wilsonee4f42b2012-02-15 11:25:38 +0000368 u32 tail;
Chris Wilson52d39a22012-02-15 11:25:37 +0000369 } *requests;
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800370
371 struct {
372 u32 gfx_mode;
373 union {
374 u64 pdp[4];
375 u32 pp_dir_base;
376 };
377 } vm_info;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200378
379 pid_t pid;
380 char comm[TASK_COMM_LEN];
Chris Wilson52d39a22012-02-15 11:25:37 +0000381 } ring[I915_NUM_RINGS];
Chris Wilson9df30792010-02-18 10:24:56 +0000382 struct drm_i915_error_buffer {
Chris Wilsona779e5a2011-01-09 21:07:49 +0000383 u32 size;
Chris Wilson9df30792010-02-18 10:24:56 +0000384 u32 name;
Chris Wilson0201f1e2012-07-20 12:41:01 +0100385 u32 rseqno, wseqno;
Chris Wilson9df30792010-02-18 10:24:56 +0000386 u32 gtt_offset;
387 u32 read_domains;
388 u32 write_domain;
Daniel Vetter4b9de732011-10-09 21:52:02 +0200389 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
Chris Wilson9df30792010-02-18 10:24:56 +0000390 s32 pinned:2;
391 u32 tiling:2;
392 u32 dirty:1;
393 u32 purgeable:1;
Daniel Vetter5d1333f2012-02-16 11:03:29 +0100394 s32 ring:4;
Chris Wilsonf56383c2013-09-25 10:23:19 +0100395 u32 cache_level:3;
Ben Widawsky95f53012013-07-31 17:00:15 -0700396 } **active_bo, **pinned_bo;
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800397
Ben Widawsky95f53012013-07-31 17:00:15 -0700398 u32 *active_bo_count, *pinned_bo_count;
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700399};
400
Jani Nikula7bd688c2013-11-08 16:48:56 +0200401struct intel_connector;
Daniel Vetterb8cecdf2013-03-27 00:44:50 +0100402struct intel_crtc_config;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +0100403struct intel_crtc;
Daniel Vetteree9300b2013-06-03 22:40:22 +0200404struct intel_limit;
405struct dpll;
Daniel Vetterb8cecdf2013-03-27 00:44:50 +0100406
Jesse Barnese70236a2009-09-21 10:42:27 -0700407struct drm_i915_display_funcs {
Adam Jacksonee5382a2010-04-23 11:17:39 -0400408 bool (*fbc_enabled)(struct drm_device *dev);
Ville Syrjälä993495a2013-12-12 17:27:40 +0200409 void (*enable_fbc)(struct drm_crtc *crtc);
Jesse Barnese70236a2009-09-21 10:42:27 -0700410 void (*disable_fbc)(struct drm_device *dev);
411 int (*get_display_clock_speed)(struct drm_device *dev);
412 int (*get_fifo_size)(struct drm_device *dev, int plane);
Daniel Vetteree9300b2013-06-03 22:40:22 +0200413 /**
414 * find_dpll() - Find the best values for the PLL
415 * @limit: limits for the PLL
416 * @crtc: current CRTC
417 * @target: target frequency in kHz
418 * @refclk: reference clock frequency in kHz
419 * @match_clock: if provided, @best_clock P divider must
420 * match the P divider from @match_clock
421 * used for LVDS downclocking
422 * @best_clock: best PLL values found
423 *
424 * Returns true on success, false on failure.
425 */
426 bool (*find_dpll)(const struct intel_limit *limit,
427 struct drm_crtc *crtc,
428 int target, int refclk,
429 struct dpll *match_clock,
430 struct dpll *best_clock);
Ville Syrjälä46ba6142013-09-10 11:40:40 +0300431 void (*update_wm)(struct drm_crtc *crtc);
Ville Syrjäläadf3d352013-08-06 22:24:11 +0300432 void (*update_sprite_wm)(struct drm_plane *plane,
433 struct drm_crtc *crtc,
Paulo Zanoni4c4ff432013-05-24 11:59:17 -0300434 uint32_t sprite_width, int pixel_size,
Ville Syrjäläbdd57d02013-07-05 11:57:13 +0300435 bool enable, bool scaled);
Daniel Vetter47fab732012-10-26 10:58:18 +0200436 void (*modeset_global_resources)(struct drm_device *dev);
Daniel Vetter0e8ffe12013-03-28 10:42:00 +0100437 /* Returns the active state of the crtc, and if the crtc is active,
438 * fills out the pipe-config with the hw state. */
439 bool (*get_pipe_config)(struct intel_crtc *,
440 struct intel_crtc_config *);
Eric Anholtf564048e2011-03-30 13:01:02 -0700441 int (*crtc_mode_set)(struct drm_crtc *crtc,
Eric Anholtf564048e2011-03-30 13:01:02 -0700442 int x, int y,
443 struct drm_framebuffer *old_fb);
Daniel Vetter76e5a892012-06-29 22:39:33 +0200444 void (*crtc_enable)(struct drm_crtc *crtc);
445 void (*crtc_disable)(struct drm_crtc *crtc);
Jesse Barnesee7b9f92012-04-20 17:11:53 +0100446 void (*off)(struct drm_crtc *crtc);
Wu Fengguange0dac652011-09-05 14:25:34 +0800447 void (*write_eld)(struct drm_connector *connector,
Jani Nikula34427052013-10-16 12:34:47 +0300448 struct drm_crtc *crtc,
449 struct drm_display_mode *mode);
Jesse Barnes674cf962011-04-28 14:27:04 -0700450 void (*fdi_link_train)(struct drm_crtc *crtc);
Jesse Barnes6067aae2011-04-28 15:04:31 -0700451 void (*init_clock_gating)(struct drm_device *dev);
Jesse Barnes8c9f3aa2011-06-16 09:19:13 -0700452 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
453 struct drm_framebuffer *fb,
Keith Packarded8d1972013-07-22 18:49:58 -0700454 struct drm_i915_gem_object *obj,
455 uint32_t flags);
Jesse Barnes17638cd2011-06-24 12:19:23 -0700456 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
457 int x, int y);
Daniel Vetter20afbda2012-12-11 14:05:07 +0100458 void (*hpd_irq_setup)(struct drm_device *dev);
Jesse Barnese70236a2009-09-21 10:42:27 -0700459 /* clock updates for mode set */
460 /* cursor updates */
461 /* render clock increase/decrease */
462 /* display clock increase/decrease */
463 /* pll clock increase/decrease */
Jani Nikula7bd688c2013-11-08 16:48:56 +0200464
465 int (*setup_backlight)(struct intel_connector *connector);
Jani Nikula7bd688c2013-11-08 16:48:56 +0200466 uint32_t (*get_backlight)(struct intel_connector *connector);
467 void (*set_backlight)(struct intel_connector *connector,
468 uint32_t level);
469 void (*disable_backlight)(struct intel_connector *connector);
470 void (*enable_backlight)(struct intel_connector *connector);
Jesse Barnese70236a2009-09-21 10:42:27 -0700471};
472
Chris Wilson907b28c2013-07-19 20:36:52 +0100473struct intel_uncore_funcs {
Deepak Sc8d9a592013-11-23 14:55:42 +0530474 void (*force_wake_get)(struct drm_i915_private *dev_priv,
475 int fw_engine);
476 void (*force_wake_put)(struct drm_i915_private *dev_priv,
477 int fw_engine);
Ben Widawsky0b274482013-10-04 21:22:51 -0700478
479 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
480 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
481 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
482 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
483
484 void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
485 uint8_t val, bool trace);
486 void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
487 uint16_t val, bool trace);
488 void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
489 uint32_t val, bool trace);
490 void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
491 uint64_t val, bool trace);
Chris Wilson990bbda2012-07-02 11:51:02 -0300492};
493
Chris Wilson907b28c2013-07-19 20:36:52 +0100494struct intel_uncore {
495 spinlock_t lock; /** lock is also taken in irq contexts. */
496
497 struct intel_uncore_funcs funcs;
498
499 unsigned fifo_count;
500 unsigned forcewake_count;
Chris Wilsonaec347a2013-08-26 13:46:09 +0100501
Deepak S940aece2013-11-23 14:55:43 +0530502 unsigned fw_rendercount;
503 unsigned fw_mediacount;
504
Chris Wilson82326442014-03-05 12:00:39 +0000505 struct timer_list force_wake_timer;
Chris Wilson907b28c2013-07-19 20:36:52 +0100506};
507
Damien Lespiau79fc46d2013-04-23 16:37:17 +0100508#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
509 func(is_mobile) sep \
510 func(is_i85x) sep \
511 func(is_i915g) sep \
512 func(is_i945gm) sep \
513 func(is_g33) sep \
514 func(need_gfx_hws) sep \
515 func(is_g4x) sep \
516 func(is_pineview) sep \
517 func(is_broadwater) sep \
518 func(is_crestline) sep \
519 func(is_ivybridge) sep \
520 func(is_valleyview) sep \
521 func(is_haswell) sep \
Ben Widawskyb833d682013-08-23 16:00:07 -0700522 func(is_preliminary) sep \
Damien Lespiau79fc46d2013-04-23 16:37:17 +0100523 func(has_fbc) sep \
524 func(has_pipe_cxsr) sep \
525 func(has_hotplug) sep \
526 func(cursor_needs_physical) sep \
527 func(has_overlay) sep \
528 func(overlay_needs_physical) sep \
529 func(supports_tv) sep \
Damien Lespiaudd93be52013-04-22 18:40:39 +0100530 func(has_llc) sep \
Damien Lespiau30568c42013-04-22 18:40:41 +0100531 func(has_ddi) sep \
532 func(has_fpga_dbg)
Daniel Vetterc96ea642012-08-08 22:01:51 +0200533
Damien Lespiaua587f772013-04-22 18:40:38 +0100534#define DEFINE_FLAG(name) u8 name:1
535#define SEP_SEMICOLON ;
Eugeni Dodonov3d29b842012-01-17 14:43:53 -0200536
Kristian Høgsbergcfdf1fa2009-12-16 15:16:16 -0500537struct intel_device_info {
Ville Syrjälä10fce672013-01-24 15:29:28 +0200538 u32 display_mmio_offset;
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700539 u8 num_pipes:3;
Damien Lespiau22d3fd462014-02-07 19:12:49 +0000540 u8 num_sprites:2;
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000541 u8 gen;
Ben Widawsky73ae4782013-10-15 10:02:57 -0700542 u8 ring_mask; /* Rings supported by the HW */
Damien Lespiaua587f772013-04-22 18:40:38 +0100543 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
Antti Koskipaaa57c7742014-02-04 14:22:24 +0200544 /* Register offsets for the various display pipes and transcoders */
545 int pipe_offsets[I915_MAX_TRANSCODERS];
546 int trans_offsets[I915_MAX_TRANSCODERS];
547 int dpll_offsets[I915_MAX_PIPES];
548 int dpll_md_offsets[I915_MAX_PIPES];
549 int palette_offsets[I915_MAX_PIPES];
Kristian Høgsbergcfdf1fa2009-12-16 15:16:16 -0500550};
551
Damien Lespiaua587f772013-04-22 18:40:38 +0100552#undef DEFINE_FLAG
553#undef SEP_SEMICOLON
554
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800555enum i915_cache_level {
556 I915_CACHE_NONE = 0,
Chris Wilson350ec882013-08-06 13:17:02 +0100557 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
558 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
559 caches, eg sampler/render caches, and the
560 large Last-Level-Cache. LLC is coherent with
561 the CPU, but L3 is only visible to the GPU. */
Chris Wilson651d7942013-08-08 14:41:10 +0100562 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800563};
564
Kenneth Graunke2d04bef2013-04-22 00:53:49 -0700565typedef uint32_t gen6_gtt_pte_t;
566
Ben Widawsky6f65e292013-12-06 14:10:56 -0800567/**
568 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
569 * VMA's presence cannot be guaranteed before binding, or after unbinding the
570 * object into/from the address space.
571 *
572 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
573 * will always be <= an objects lifetime. So object refcounting should cover us.
574 */
575struct i915_vma {
576 struct drm_mm_node node;
577 struct drm_i915_gem_object *obj;
578 struct i915_address_space *vm;
579
580 /** This object's place on the active/inactive lists */
581 struct list_head mm_list;
582
583 struct list_head vma_link; /* Link in the object's VMA list */
584
585 /** This vma's place in the batchbuffer or on the eviction list */
586 struct list_head exec_list;
587
588 /**
589 * Used for performing relocations during execbuffer insertion.
590 */
591 struct hlist_node exec_node;
592 unsigned long exec_handle;
593 struct drm_i915_gem_exec_object2 *exec_entry;
594
595 /**
596 * How many users have pinned this object in GTT space. The following
597 * users can each hold at most one reference: pwrite/pread, pin_ioctl
598 * (via user_pin_count), execbuffer (objects are not allowed multiple
599 * times for the same batchbuffer), and the framebuffer code. When
600 * switching/pageflipping, the framebuffer code has at most two buffers
601 * pinned per crtc.
602 *
603 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
604 * bits with absolutely no headroom. So use 4 bits. */
605 unsigned int pin_count:4;
606#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
607
608 /** Unmap an object from an address space. This usually consists of
609 * setting the valid PTE entries to a reserved scratch page. */
610 void (*unbind_vma)(struct i915_vma *vma);
611 /* Map an object into an address space with the given cache flags. */
612#define GLOBAL_BIND (1<<0)
613 void (*bind_vma)(struct i915_vma *vma,
614 enum i915_cache_level cache_level,
615 u32 flags);
616};
617
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700618struct i915_address_space {
Ben Widawsky93bd8642013-07-16 16:50:06 -0700619 struct drm_mm mm;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700620 struct drm_device *dev;
Ben Widawskya7bbbd62013-07-16 16:50:07 -0700621 struct list_head global_link;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700622 unsigned long start; /* Start offset always 0 for dri2 */
623 size_t total; /* size addr space maps (ex. 2GB for ggtt) */
624
625 struct {
626 dma_addr_t addr;
627 struct page *page;
628 } scratch;
629
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700630 /**
631 * List of objects currently involved in rendering.
632 *
633 * Includes buffers having the contents of their GPU caches
634 * flushed, not necessarily primitives. last_rendering_seqno
635 * represents when the rendering involved will be completed.
636 *
637 * A reference is held on the buffer while on this list.
638 */
639 struct list_head active_list;
640
641 /**
642 * LRU list of objects which are not in the ringbuffer and
643 * are ready to unbind, but are still in the GTT.
644 *
645 * last_rendering_seqno is 0 while an object is in this list.
646 *
647 * A reference is not held on the buffer while on this list,
648 * as merely being GTT-bound shouldn't prevent its being
649 * freed, and we'll pull it off the list in the free path.
650 */
651 struct list_head inactive_list;
652
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700653 /* FIXME: Need a more generic return type */
654 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -0700655 enum i915_cache_level level,
656 bool valid); /* Create a valid PTE */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700657 void (*clear_range)(struct i915_address_space *vm,
Ben Widawsky782f1492014-02-20 11:50:33 -0800658 uint64_t start,
659 uint64_t length,
Ben Widawsky828c7902013-10-16 09:21:30 -0700660 bool use_scratch);
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700661 void (*insert_entries)(struct i915_address_space *vm,
662 struct sg_table *st,
Ben Widawsky782f1492014-02-20 11:50:33 -0800663 uint64_t start,
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700664 enum i915_cache_level cache_level);
665 void (*cleanup)(struct i915_address_space *vm);
666};
667
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800668/* The Graphics Translation Table is the way in which GEN hardware translates a
669 * Graphics Virtual Address into a Physical Address. In addition to the normal
670 * collateral associated with any va->pa translations GEN hardware also has a
671 * portion of the GTT which can be mapped by the CPU and remain both coherent
672 * and correct (in cases like swizzling). That region is referred to as GMADR in
673 * the spec.
674 */
675struct i915_gtt {
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700676 struct i915_address_space base;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800677 size_t stolen_size; /* Total size of stolen memory */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800678
679 unsigned long mappable_end; /* End offset that we can CPU map */
680 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
681 phys_addr_t mappable_base; /* PA of our GMADR */
682
683 /** "Graphics Stolen Memory" holds the global PTEs */
684 void __iomem *gsm;
Ben Widawskya81cc002013-01-18 12:30:31 -0800685
686 bool do_idle_maps;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800687
Ben Widawsky911bdf02013-06-27 16:30:23 -0700688 int mtrr;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800689
690 /* global gtt ops */
Ben Widawskybaa09f52013-01-24 13:49:57 -0800691 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800692 size_t *stolen, phys_addr_t *mappable_base,
693 unsigned long *mappable_end);
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800694};
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700695#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800696
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800697#define GEN8_LEGACY_PDPS 4
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100698struct i915_hw_ppgtt {
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700699 struct i915_address_space base;
Ben Widawskyc7c48df2013-12-06 14:11:15 -0800700 struct kref ref;
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -0800701 struct drm_mm_node node;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100702 unsigned num_pd_entries;
Ben Widawsky5abbcca2014-02-21 13:06:34 -0800703 unsigned num_pd_pages; /* gen8+ */
Ben Widawsky37aca442013-11-04 20:47:32 -0800704 union {
705 struct page **pt_pages;
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800706 struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
Ben Widawsky37aca442013-11-04 20:47:32 -0800707 };
708 struct page *pd_pages;
Ben Widawsky37aca442013-11-04 20:47:32 -0800709 union {
710 uint32_t pd_offset;
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800711 dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
Ben Widawsky37aca442013-11-04 20:47:32 -0800712 };
713 union {
714 dma_addr_t *pt_dma_addr;
715 dma_addr_t *gen8_pt_dma_addr[4];
716 };
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100717
Ben Widawskya3d67d22013-12-06 14:11:06 -0800718 int (*enable)(struct i915_hw_ppgtt *ppgtt);
Ben Widawskyeeb94882013-12-06 14:11:10 -0800719 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
720 struct intel_ring_buffer *ring,
721 bool synchronous);
Ben Widawsky87d60b62013-12-06 14:11:29 -0800722 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
Daniel Vetter02e792f2009-09-15 22:57:34 +0200723};
724
Mika Kuoppalae59ec132013-06-12 12:35:28 +0300725struct i915_ctx_hang_stats {
726 /* This context had batch pending when hang was declared */
727 unsigned batch_pending;
728
729 /* This context had batch active when hang was declared */
730 unsigned batch_active;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +0300731
732 /* Time when this context was last blamed for a GPU reset */
733 unsigned long guilty_ts;
734
735 /* This context is banned to submit more work */
736 bool banned;
Mika Kuoppalae59ec132013-06-12 12:35:28 +0300737};
Ben Widawsky40521052012-06-04 14:42:43 -0700738
739/* This must match up with the value previously used for execbuf2.rsvd1. */
740#define DEFAULT_CONTEXT_ID 0
741struct i915_hw_context {
Mika Kuoppaladce32712013-04-30 13:30:33 +0300742 struct kref ref;
Ben Widawsky40521052012-06-04 14:42:43 -0700743 int id;
Ben Widawskye0556842012-06-04 14:42:46 -0700744 bool is_initialized;
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700745 uint8_t remap_slice;
Ben Widawsky40521052012-06-04 14:42:43 -0700746 struct drm_i915_file_private *file_priv;
Ben Widawsky0009e462013-12-06 14:11:02 -0800747 struct intel_ring_buffer *last_ring;
Ben Widawsky40521052012-06-04 14:42:43 -0700748 struct drm_i915_gem_object *obj;
Mika Kuoppalae59ec132013-06-12 12:35:28 +0300749 struct i915_ctx_hang_stats hang_stats;
Ben Widawskyc7c48df2013-12-06 14:11:15 -0800750 struct i915_address_space *vm;
Ben Widawskya33afea2013-09-17 21:12:45 -0700751
752 struct list_head link;
Ben Widawsky40521052012-06-04 14:42:43 -0700753};
754
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700755struct i915_fbc {
756 unsigned long size;
757 unsigned int fb_id;
758 enum plane plane;
759 int y;
760
761 struct drm_mm_node *compressed_fb;
762 struct drm_mm_node *compressed_llb;
763
764 struct intel_fbc_work {
765 struct delayed_work work;
766 struct drm_crtc *crtc;
767 struct drm_framebuffer *fb;
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700768 } *fbc_work;
769
Chris Wilson29ebf902013-07-27 17:23:55 +0100770 enum no_fbc_reason {
771 FBC_OK, /* FBC is enabled */
772 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700773 FBC_NO_OUTPUT, /* no outputs enabled to compress */
774 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
775 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
776 FBC_MODE_TOO_LARGE, /* mode too large for compression */
777 FBC_BAD_PLANE, /* fbc not supported on plane */
778 FBC_NOT_TILED, /* buffer not tiled */
779 FBC_MULTIPLE_PIPES, /* more than one pipe active */
780 FBC_MODULE_PARAM,
781 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
782 } no_fbc_reason;
Jesse Barnesb5e50c32010-02-05 12:42:41 -0800783};
784
Rodrigo Vivia031d702013-10-03 16:15:06 -0300785struct i915_psr {
786 bool sink_support;
787 bool source_ok;
Rodrigo Vivi3f51e472013-07-11 18:45:00 -0300788};
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700789
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800790enum intel_pch {
Paulo Zanonif0350832012-07-03 18:48:16 -0300791 PCH_NONE = 0, /* No PCH present */
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800792 PCH_IBX, /* Ibexpeak PCH */
793 PCH_CPT, /* Cougarpoint PCH */
Eugeni Dodonoveb877eb2012-03-29 12:32:20 -0300794 PCH_LPT, /* Lynxpoint PCH */
Ben Widawsky40c7ead2013-04-05 13:12:40 -0700795 PCH_NOP,
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800796};
797
Paulo Zanoni988d6ee2012-12-01 12:04:24 -0200798enum intel_sbi_destination {
799 SBI_ICLK,
800 SBI_MPHY,
801};
802
Jesse Barnesb690e962010-07-19 13:53:12 -0700803#define QUIRK_PIPEA_FORCE (1<<0)
Keith Packard435793d2011-07-12 14:56:22 -0700804#define QUIRK_LVDS_SSC_DISABLE (1<<1)
Carsten Emde4dca20e2012-03-15 15:56:26 +0100805#define QUIRK_INVERT_BRIGHTNESS (1<<2)
Jesse Barnesb690e962010-07-19 13:53:12 -0700806
Dave Airlie8be48d92010-03-30 05:34:14 +0000807struct intel_fbdev;
Chris Wilson1630fe72011-07-08 12:22:42 +0100808struct intel_fbc_work;
Dave Airlie38651672010-03-30 05:34:13 +0000809
Daniel Vetterc2b91522012-02-14 22:37:19 +0100810struct intel_gmbus {
811 struct i2c_adapter adapter;
Chris Wilsonf2ce9fa2012-11-10 15:58:21 +0000812 u32 force_bit;
Daniel Vetterc2b91522012-02-14 22:37:19 +0100813 u32 reg0;
Daniel Vetter36c785f2012-02-14 22:37:22 +0100814 u32 gpio_reg;
Daniel Vetterc167a6f2012-02-28 00:43:09 +0100815 struct i2c_algo_bit_data bit_algo;
Daniel Vetterc2b91522012-02-14 22:37:19 +0100816 struct drm_i915_private *dev_priv;
817};
818
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100819struct i915_suspend_saved_registers {
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000820 u8 saveLBB;
821 u32 saveDSPACNTR;
822 u32 saveDSPBCNTR;
Keith Packarde948e992008-05-07 12:27:53 +1000823 u32 saveDSPARB;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000824 u32 savePIPEACONF;
825 u32 savePIPEBCONF;
826 u32 savePIPEASRC;
827 u32 savePIPEBSRC;
828 u32 saveFPA0;
829 u32 saveFPA1;
830 u32 saveDPLL_A;
831 u32 saveDPLL_A_MD;
832 u32 saveHTOTAL_A;
833 u32 saveHBLANK_A;
834 u32 saveHSYNC_A;
835 u32 saveVTOTAL_A;
836 u32 saveVBLANK_A;
837 u32 saveVSYNC_A;
838 u32 saveBCLRPAT_A;
Zhenyu Wang5586c8b2009-11-06 02:13:02 +0000839 u32 saveTRANSACONF;
Zhenyu Wang42048782009-10-21 15:27:01 +0800840 u32 saveTRANS_HTOTAL_A;
841 u32 saveTRANS_HBLANK_A;
842 u32 saveTRANS_HSYNC_A;
843 u32 saveTRANS_VTOTAL_A;
844 u32 saveTRANS_VBLANK_A;
845 u32 saveTRANS_VSYNC_A;
Jesse Barnes0da3ea12008-02-20 09:39:58 +1000846 u32 savePIPEASTAT;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000847 u32 saveDSPASTRIDE;
848 u32 saveDSPASIZE;
849 u32 saveDSPAPOS;
Jesse Barnes585fb112008-07-29 11:54:06 -0700850 u32 saveDSPAADDR;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000851 u32 saveDSPASURF;
852 u32 saveDSPATILEOFF;
853 u32 savePFIT_PGM_RATIOS;
Jesse Barnes0eb96d62009-10-14 12:33:41 -0700854 u32 saveBLC_HIST_CTL;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000855 u32 saveBLC_PWM_CTL;
856 u32 saveBLC_PWM_CTL2;
Jesse Barnes07bf1392013-10-31 18:55:50 +0200857 u32 saveBLC_HIST_CTL_B;
Zhenyu Wang42048782009-10-21 15:27:01 +0800858 u32 saveBLC_CPU_PWM_CTL;
859 u32 saveBLC_CPU_PWM_CTL2;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000860 u32 saveFPB0;
861 u32 saveFPB1;
862 u32 saveDPLL_B;
863 u32 saveDPLL_B_MD;
864 u32 saveHTOTAL_B;
865 u32 saveHBLANK_B;
866 u32 saveHSYNC_B;
867 u32 saveVTOTAL_B;
868 u32 saveVBLANK_B;
869 u32 saveVSYNC_B;
870 u32 saveBCLRPAT_B;
Zhenyu Wang5586c8b2009-11-06 02:13:02 +0000871 u32 saveTRANSBCONF;
Zhenyu Wang42048782009-10-21 15:27:01 +0800872 u32 saveTRANS_HTOTAL_B;
873 u32 saveTRANS_HBLANK_B;
874 u32 saveTRANS_HSYNC_B;
875 u32 saveTRANS_VTOTAL_B;
876 u32 saveTRANS_VBLANK_B;
877 u32 saveTRANS_VSYNC_B;
Jesse Barnes0da3ea12008-02-20 09:39:58 +1000878 u32 savePIPEBSTAT;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000879 u32 saveDSPBSTRIDE;
880 u32 saveDSPBSIZE;
881 u32 saveDSPBPOS;
Jesse Barnes585fb112008-07-29 11:54:06 -0700882 u32 saveDSPBADDR;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000883 u32 saveDSPBSURF;
884 u32 saveDSPBTILEOFF;
Jesse Barnes585fb112008-07-29 11:54:06 -0700885 u32 saveVGA0;
886 u32 saveVGA1;
887 u32 saveVGA_PD;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000888 u32 saveVGACNTRL;
889 u32 saveADPA;
890 u32 saveLVDS;
Jesse Barnes585fb112008-07-29 11:54:06 -0700891 u32 savePP_ON_DELAYS;
892 u32 savePP_OFF_DELAYS;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000893 u32 saveDVOA;
894 u32 saveDVOB;
895 u32 saveDVOC;
896 u32 savePP_ON;
897 u32 savePP_OFF;
898 u32 savePP_CONTROL;
Jesse Barnes585fb112008-07-29 11:54:06 -0700899 u32 savePP_DIVISOR;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000900 u32 savePFIT_CONTROL;
901 u32 save_palette_a[256];
902 u32 save_palette_b[256];
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000903 u32 saveFBC_CONTROL;
Jesse Barnes0da3ea12008-02-20 09:39:58 +1000904 u32 saveIER;
905 u32 saveIIR;
906 u32 saveIMR;
Zhenyu Wang42048782009-10-21 15:27:01 +0800907 u32 saveDEIER;
908 u32 saveDEIMR;
909 u32 saveGTIER;
910 u32 saveGTIMR;
911 u32 saveFDI_RXA_IMR;
912 u32 saveFDI_RXB_IMR;
Keith Packard1f84e552008-02-16 19:19:29 -0800913 u32 saveCACHE_MODE_0;
Keith Packard1f84e552008-02-16 19:19:29 -0800914 u32 saveMI_ARB_STATE;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000915 u32 saveSWF0[16];
916 u32 saveSWF1[16];
917 u32 saveSWF2[3];
918 u8 saveMSR;
919 u8 saveSR[8];
Jesse Barnes123f7942008-02-07 11:15:20 -0800920 u8 saveGR[25];
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000921 u8 saveAR_INDEX;
Jesse Barnesa59e1222008-05-07 12:25:46 +1000922 u8 saveAR[21];
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000923 u8 saveDACMASK;
Jesse Barnesa59e1222008-05-07 12:25:46 +1000924 u8 saveCR[37];
Daniel Vetter4b9de732011-10-09 21:52:02 +0200925 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
Eric Anholt1fd1c622009-06-03 07:26:58 +0000926 u32 saveCURACNTR;
927 u32 saveCURAPOS;
928 u32 saveCURABASE;
929 u32 saveCURBCNTR;
930 u32 saveCURBPOS;
931 u32 saveCURBBASE;
932 u32 saveCURSIZE;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700933 u32 saveDP_B;
934 u32 saveDP_C;
935 u32 saveDP_D;
936 u32 savePIPEA_GMCH_DATA_M;
937 u32 savePIPEB_GMCH_DATA_M;
938 u32 savePIPEA_GMCH_DATA_N;
939 u32 savePIPEB_GMCH_DATA_N;
940 u32 savePIPEA_DP_LINK_M;
941 u32 savePIPEB_DP_LINK_M;
942 u32 savePIPEA_DP_LINK_N;
943 u32 savePIPEB_DP_LINK_N;
Zhenyu Wang42048782009-10-21 15:27:01 +0800944 u32 saveFDI_RXA_CTL;
945 u32 saveFDI_TXA_CTL;
946 u32 saveFDI_RXB_CTL;
947 u32 saveFDI_TXB_CTL;
948 u32 savePFA_CTL_1;
949 u32 savePFB_CTL_1;
950 u32 savePFA_WIN_SZ;
951 u32 savePFB_WIN_SZ;
952 u32 savePFA_WIN_POS;
953 u32 savePFB_WIN_POS;
Zhenyu Wang5586c8b2009-11-06 02:13:02 +0000954 u32 savePCH_DREF_CONTROL;
955 u32 saveDISP_ARB_CTL;
956 u32 savePIPEA_DATA_M1;
957 u32 savePIPEA_DATA_N1;
958 u32 savePIPEA_LINK_M1;
959 u32 savePIPEA_LINK_N1;
960 u32 savePIPEB_DATA_M1;
961 u32 savePIPEB_DATA_N1;
962 u32 savePIPEB_LINK_M1;
963 u32 savePIPEB_LINK_N1;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000964 u32 saveMCHBAR_RENDER_STANDBY;
Adam Jacksoncda2bb72011-07-26 16:53:06 -0400965 u32 savePCH_PORT_HOTPLUG;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100966};
Daniel Vetterc85aa882012-11-02 19:55:03 +0100967
968struct intel_gen6_power_mgmt {
Daniel Vetter59cdb632013-07-04 23:35:28 +0200969 /* work and pm_iir are protected by dev_priv->irq_lock */
Daniel Vetterc85aa882012-11-02 19:55:03 +0100970 struct work_struct work;
971 u32 pm_iir;
Daniel Vetter59cdb632013-07-04 23:35:28 +0200972
Daniel Vetterc85aa882012-11-02 19:55:03 +0100973 u8 cur_delay;
974 u8 min_delay;
975 u8 max_delay;
Jesse Barnes52ceb902013-04-23 10:09:26 -0700976 u8 rpe_delay;
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100977 u8 rp1_delay;
978 u8 rp0_delay;
Ben Widawsky31c77382013-04-05 14:29:22 -0700979 u8 hw_max;
Jesse Barnes1a01ab32012-11-02 11:14:00 -0700980
Deepak S27544362014-01-27 21:35:05 +0530981 bool rp_up_masked;
982 bool rp_down_masked;
983
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100984 int last_adj;
985 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
986
Chris Wilsonc0951f02013-10-10 21:58:50 +0100987 bool enabled;
Jesse Barnes1a01ab32012-11-02 11:14:00 -0700988 struct delayed_work delayed_resume_work;
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700989
990 /*
991 * Protects RPS/RC6 register access and PCU communication.
992 * Must be taken after struct_mutex if nested.
993 */
994 struct mutex hw_lock;
Daniel Vetterc85aa882012-11-02 19:55:03 +0100995};
996
Daniel Vetter1a240d42012-11-29 22:18:51 +0100997/* defined intel_pm.c */
998extern spinlock_t mchdev_lock;
999
Daniel Vetterc85aa882012-11-02 19:55:03 +01001000struct intel_ilk_power_mgmt {
1001 u8 cur_delay;
1002 u8 min_delay;
1003 u8 max_delay;
1004 u8 fmax;
1005 u8 fstart;
1006
1007 u64 last_count1;
1008 unsigned long last_time1;
1009 unsigned long chipset_power;
1010 u64 last_count2;
1011 struct timespec last_time2;
1012 unsigned long gfx_power;
1013 u8 corr;
1014
1015 int c_m;
1016 int r_t;
Daniel Vetter3e373942012-11-02 19:55:04 +01001017
1018 struct drm_i915_gem_object *pwrctx;
1019 struct drm_i915_gem_object *renderctx;
Daniel Vetterc85aa882012-11-02 19:55:03 +01001020};
1021
Wang Xingchaoa38911a2013-05-30 22:07:11 +08001022/* Power well structure for haswell */
1023struct i915_power_well {
Imre Deakc1ca7272013-11-25 17:15:29 +02001024 const char *name;
Imre Deak6f3ef5d2013-11-25 17:15:30 +02001025 bool always_on;
Wang Xingchaoa38911a2013-05-30 22:07:11 +08001026 /* power well enable/disable usage count */
1027 int count;
Imre Deakc1ca7272013-11-25 17:15:29 +02001028 unsigned long domains;
1029 void *data;
Imre Deakda7e29b2014-02-18 00:02:02 +02001030 void (*set)(struct drm_i915_private *dev_priv, struct i915_power_well *power_well,
Imre Deakc1ca7272013-11-25 17:15:29 +02001031 bool enable);
Imre Deakda7e29b2014-02-18 00:02:02 +02001032 bool (*is_enabled)(struct drm_i915_private *dev_priv,
Imre Deakc1ca7272013-11-25 17:15:29 +02001033 struct i915_power_well *power_well);
Wang Xingchaoa38911a2013-05-30 22:07:11 +08001034};
1035
Imre Deak83c00f552013-10-25 17:36:47 +03001036struct i915_power_domains {
Imre Deakbaa70702013-10-25 17:36:48 +03001037 /*
1038 * Power wells needed for initialization at driver init and suspend
1039 * time are on. They are kept on until after the first modeset.
1040 */
1041 bool init_power_on;
Imre Deakc1ca7272013-11-25 17:15:29 +02001042 int power_well_count;
Imre Deakbaa70702013-10-25 17:36:48 +03001043
Imre Deak83c00f552013-10-25 17:36:47 +03001044 struct mutex lock;
Imre Deak1da51582013-11-25 17:15:35 +02001045 int domain_use_count[POWER_DOMAIN_NUM];
Imre Deakc1ca7272013-11-25 17:15:29 +02001046 struct i915_power_well *power_wells;
Imre Deak83c00f552013-10-25 17:36:47 +03001047};
1048
Daniel Vetter231f42a2012-11-02 19:55:05 +01001049struct i915_dri1_state {
1050 unsigned allow_batchbuffer : 1;
1051 u32 __iomem *gfx_hws_cpu_addr;
1052
1053 unsigned int cpp;
1054 int back_offset;
1055 int front_offset;
1056 int current_page;
1057 int page_flipping;
1058
1059 uint32_t counter;
1060};
1061
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02001062struct i915_ums_state {
1063 /**
1064 * Flag if the X Server, and thus DRM, is not currently in
1065 * control of the device.
1066 *
1067 * This is set between LeaveVT and EnterVT. It needs to be
1068 * replaced with a semaphore. It also needs to be
1069 * transitioned away from for kernel modesetting.
1070 */
1071 int mm_suspended;
1072};
1073
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001074#define MAX_L3_SLICES 2
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001075struct intel_l3_parity {
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001076 u32 *remap_info[MAX_L3_SLICES];
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001077 struct work_struct error_work;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001078 int which_slice;
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001079};
1080
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001081struct i915_gem_mm {
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001082 /** Memory allocator for GTT stolen memory */
1083 struct drm_mm stolen;
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001084 /** List of all objects in gtt_space. Used to restore gtt
1085 * mappings on resume */
1086 struct list_head bound_list;
1087 /**
1088 * List of objects which are not bound to the GTT (thus
1089 * are idle and not used by the GPU) but still have
1090 * (presumably uncached) pages still attached.
1091 */
1092 struct list_head unbound_list;
1093
1094 /** Usable portion of the GTT for GEM */
1095 unsigned long stolen_base; /* limited to low memory (32-bit) */
1096
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001097 /** PPGTT used for aliasing the PPGTT with the GTT */
1098 struct i915_hw_ppgtt *aliasing_ppgtt;
1099
1100 struct shrinker inactive_shrinker;
1101 bool shrinker_no_lock_stealing;
1102
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001103 /** LRU list of objects with fence regs on them. */
1104 struct list_head fence_list;
1105
1106 /**
1107 * We leave the user IRQ off as much as possible,
1108 * but this means that requests will finish and never
1109 * be retired once the system goes idle. Set a timer to
1110 * fire periodically while the ring is running. When it
1111 * fires, go retire requests.
1112 */
1113 struct delayed_work retire_work;
1114
1115 /**
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001116 * When we detect an idle GPU, we want to turn on
1117 * powersaving features. So once we see that there
1118 * are no more requests outstanding and no more
1119 * arrive within a small period of time, we fire
1120 * off the idle_work.
1121 */
1122 struct delayed_work idle_work;
1123
1124 /**
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001125 * Are we in a non-interruptible section of code like
1126 * modesetting?
1127 */
1128 bool interruptible;
1129
Chris Wilsonf62a0072014-02-21 17:55:39 +00001130 /**
1131 * Is the GPU currently considered idle, or busy executing userspace
1132 * requests? Whilst idle, we attempt to power down the hardware and
1133 * display clocks. In order to reduce the effect on performance, there
1134 * is a slight delay before we do so.
1135 */
1136 bool busy;
1137
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001138 /** Bit 6 swizzling required for X tiling */
1139 uint32_t bit_6_swizzle_x;
1140 /** Bit 6 swizzling required for Y tiling */
1141 uint32_t bit_6_swizzle_y;
1142
1143 /* storage for physical objects */
1144 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
1145
1146 /* accounting, useful for userland debugging */
Daniel Vetterc20e8352013-07-24 22:40:23 +02001147 spinlock_t object_stat_lock;
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001148 size_t object_memory;
1149 u32 object_count;
1150};
1151
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001152struct drm_i915_error_state_buf {
1153 unsigned bytes;
1154 unsigned size;
1155 int err;
1156 u8 *buf;
1157 loff_t start;
1158 loff_t pos;
1159};
1160
Mika Kuoppalafc16b482013-06-06 15:18:39 +03001161struct i915_error_state_file_priv {
1162 struct drm_device *dev;
1163 struct drm_i915_error_state *error;
1164};
1165
Daniel Vetter99584db2012-11-14 17:14:04 +01001166struct i915_gpu_error {
1167 /* For hangcheck timer */
1168#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1169#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03001170 /* Hang gpu twice in this window and your context gets banned */
1171#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1172
Daniel Vetter99584db2012-11-14 17:14:04 +01001173 struct timer_list hangcheck_timer;
Daniel Vetter99584db2012-11-14 17:14:04 +01001174
1175 /* For reset and error_state handling. */
1176 spinlock_t lock;
1177 /* Protected by the above dev->gpu_error.lock. */
1178 struct drm_i915_error_state *first_error;
1179 struct work_struct work;
Daniel Vetter99584db2012-11-14 17:14:04 +01001180
Chris Wilson094f9a52013-09-25 17:34:55 +01001181
1182 unsigned long missed_irq_rings;
1183
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001184 /**
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02001185 * State variable controlling the reset flow and count
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001186 *
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02001187 * This is a counter which gets incremented when reset is triggered,
1188 * and again when reset has been handled. So odd values (lowest bit set)
1189 * means that reset is in progress and even values that
1190 * (reset_counter >> 1):th reset was successfully completed.
1191 *
1192 * If reset is not completed succesfully, the I915_WEDGE bit is
1193 * set meaning that hardware is terminally sour and there is no
1194 * recovery. All waiters on the reset_queue will be woken when
1195 * that happens.
1196 *
1197 * This counter is used by the wait_seqno code to notice that reset
1198 * event happened and it needs to restart the entire ioctl (since most
1199 * likely the seqno it waited for won't ever signal anytime soon).
Daniel Vetterf69061b2012-12-06 09:01:42 +01001200 *
1201 * This is important for lock-free wait paths, where no contended lock
1202 * naturally enforces the correct ordering between the bail-out of the
1203 * waiter and the gpu reset work code.
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001204 */
1205 atomic_t reset_counter;
1206
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001207#define I915_RESET_IN_PROGRESS_FLAG 1
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02001208#define I915_WEDGED (1 << 31)
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001209
1210 /**
1211 * Waitqueue to signal when the reset has completed. Used by clients
1212 * that wait for dev_priv->mm.wedged to settle.
1213 */
1214 wait_queue_head_t reset_queue;
Daniel Vetter33196de2012-11-14 17:14:05 +01001215
Daniel Vetter99584db2012-11-14 17:14:04 +01001216 /* For gpu hang simulation. */
1217 unsigned int stop_rings;
Chris Wilson094f9a52013-09-25 17:34:55 +01001218
1219 /* For missed irq/seqno simulation. */
1220 unsigned int test_irq_rings;
Daniel Vetter99584db2012-11-14 17:14:04 +01001221};
1222
Zhang Ruib8efb172013-02-05 15:41:53 +08001223enum modeset_restore {
1224 MODESET_ON_LID_OPEN,
1225 MODESET_DONE,
1226 MODESET_SUSPENDED,
1227};
1228
Paulo Zanoni6acab152013-09-12 17:06:24 -03001229struct ddi_vbt_port_info {
1230 uint8_t hdmi_level_shift;
Paulo Zanoni311a2092013-09-12 17:12:18 -03001231
1232 uint8_t supports_dvi:1;
1233 uint8_t supports_hdmi:1;
1234 uint8_t supports_dp:1;
Paulo Zanoni6acab152013-09-12 17:06:24 -03001235};
1236
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001237struct intel_vbt_data {
1238 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1239 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1240
1241 /* Feature bits */
1242 unsigned int int_tv_support:1;
1243 unsigned int lvds_dither:1;
1244 unsigned int lvds_vbt:1;
1245 unsigned int int_crt_support:1;
1246 unsigned int lvds_use_ssc:1;
1247 unsigned int display_clock_mode:1;
1248 unsigned int fdi_rx_polarity_inverted:1;
1249 int lvds_ssc_freq;
1250 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1251
1252 /* eDP */
1253 int edp_rate;
1254 int edp_lanes;
1255 int edp_preemphasis;
1256 int edp_vswing;
1257 bool edp_initialized;
1258 bool edp_support;
1259 int edp_bpp;
1260 struct edp_power_seq edp_pps;
1261
Jani Nikulaf00076d2013-12-14 20:38:29 -02001262 struct {
1263 u16 pwm_freq_hz;
1264 bool active_low_pwm;
1265 } backlight;
1266
Shobhit Kumard17c5442013-08-27 15:12:25 +03001267 /* MIPI DSI */
1268 struct {
1269 u16 panel_id;
1270 } dsi;
1271
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001272 int crt_ddc_pin;
1273
1274 int child_dev_num;
Paulo Zanoni768f69c2013-09-11 18:02:47 -03001275 union child_device_config *child_dev;
Paulo Zanoni6acab152013-09-12 17:06:24 -03001276
1277 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001278};
1279
Ville Syrjälä77c122b2013-08-06 22:24:04 +03001280enum intel_ddb_partitioning {
1281 INTEL_DDB_PART_1_2,
1282 INTEL_DDB_PART_5_6, /* IVB+ */
1283};
1284
Ville Syrjälä1fd527c2013-08-06 22:24:05 +03001285struct intel_wm_level {
1286 bool enable;
1287 uint32_t pri_val;
1288 uint32_t spr_val;
1289 uint32_t cur_val;
1290 uint32_t fbc_val;
1291};
1292
Imre Deak820c1982013-12-17 14:46:36 +02001293struct ilk_wm_values {
Ville Syrjälä609cede2013-10-09 19:18:03 +03001294 uint32_t wm_pipe[3];
1295 uint32_t wm_lp[3];
1296 uint32_t wm_lp_spr[3];
1297 uint32_t wm_linetime[3];
1298 bool enable_fbc_wm;
1299 enum intel_ddb_partitioning partitioning;
1300};
1301
Paulo Zanonic67a4702013-08-19 13:18:09 -03001302/*
1303 * This struct tracks the state needed for the Package C8+ feature.
1304 *
1305 * Package states C8 and deeper are really deep PC states that can only be
1306 * reached when all the devices on the system allow it, so even if the graphics
1307 * device allows PC8+, it doesn't mean the system will actually get to these
1308 * states.
1309 *
1310 * Our driver only allows PC8+ when all the outputs are disabled, the power well
1311 * is disabled and the GPU is idle. When these conditions are met, we manually
1312 * do the other conditions: disable the interrupts, clocks and switch LCPLL
1313 * refclk to Fclk.
1314 *
1315 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1316 * the state of some registers, so when we come back from PC8+ we need to
1317 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1318 * need to take care of the registers kept by RC6.
1319 *
1320 * The interrupt disabling is part of the requirements. We can only leave the
1321 * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
1322 * can lock the machine.
1323 *
1324 * Ideally every piece of our code that needs PC8+ disabled would call
1325 * hsw_disable_package_c8, which would increment disable_count and prevent the
1326 * system from reaching PC8+. But we don't have a symmetric way to do this for
Paulo Zanoni86c4ec02014-02-21 13:52:24 -03001327 * everything, so we have the requirements_met variable. When we switch
1328 * requirements_met to true we decrease disable_count, and increase it in the
1329 * opposite case. The requirements_met variable is true when all the CRTCs,
1330 * encoders and the power well are disabled.
Paulo Zanonic67a4702013-08-19 13:18:09 -03001331 *
1332 * In addition to everything, we only actually enable PC8+ if disable_count
1333 * stays at zero for at least some seconds. This is implemented with the
1334 * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1335 * consecutive times when all screens are disabled and some background app
1336 * queries the state of our connectors, or we have some application constantly
1337 * waking up to use the GPU. Only after the enable_work function actually
1338 * enables PC8+ the "enable" variable will become true, which means that it can
1339 * be false even if disable_count is 0.
1340 *
1341 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1342 * goes back to false exactly before we reenable the IRQs. We use this variable
1343 * to check if someone is trying to enable/disable IRQs while they're supposed
1344 * to be disabled. This shouldn't happen and we'll print some error messages in
1345 * case it happens, but if it actually happens we'll also update the variables
1346 * inside struct regsave so when we restore the IRQs they will contain the
1347 * latest expected values.
1348 *
1349 * For more, read "Display Sequences for Package C8" on our documentation.
1350 */
1351struct i915_package_c8 {
1352 bool requirements_met;
Paulo Zanonic67a4702013-08-19 13:18:09 -03001353 bool irqs_disabled;
1354 /* Only true after the delayed work task actually enables it. */
1355 bool enabled;
1356 int disable_count;
1357 struct mutex lock;
1358 struct delayed_work enable_work;
1359
1360 struct {
1361 uint32_t deimr;
1362 uint32_t sdeimr;
1363 uint32_t gtimr;
1364 uint32_t gtier;
1365 uint32_t gen6_pmimr;
1366 } regsave;
1367};
1368
Paulo Zanoni8a187452013-12-06 20:32:13 -02001369struct i915_runtime_pm {
1370 bool suspended;
1371};
1372
Daniel Vetter926321d2013-10-16 13:30:34 +02001373enum intel_pipe_crc_source {
1374 INTEL_PIPE_CRC_SOURCE_NONE,
1375 INTEL_PIPE_CRC_SOURCE_PLANE1,
1376 INTEL_PIPE_CRC_SOURCE_PLANE2,
1377 INTEL_PIPE_CRC_SOURCE_PF,
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001378 INTEL_PIPE_CRC_SOURCE_PIPE,
Daniel Vetter3d099a02013-10-16 22:55:58 +02001379 /* TV/DP on pre-gen5/vlv can't use the pipe source. */
1380 INTEL_PIPE_CRC_SOURCE_TV,
1381 INTEL_PIPE_CRC_SOURCE_DP_B,
1382 INTEL_PIPE_CRC_SOURCE_DP_C,
1383 INTEL_PIPE_CRC_SOURCE_DP_D,
Daniel Vetter46a19182013-11-01 10:50:20 +01001384 INTEL_PIPE_CRC_SOURCE_AUTO,
Daniel Vetter926321d2013-10-16 13:30:34 +02001385 INTEL_PIPE_CRC_SOURCE_MAX,
1386};
1387
Shuang He8bf1e9f2013-10-15 18:55:27 +01001388struct intel_pipe_crc_entry {
Damien Lespiauac2300d2013-10-15 18:55:30 +01001389 uint32_t frame;
Shuang He8bf1e9f2013-10-15 18:55:27 +01001390 uint32_t crc[5];
1391};
1392
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001393#define INTEL_PIPE_CRC_ENTRIES_NR 128
Shuang He8bf1e9f2013-10-15 18:55:27 +01001394struct intel_pipe_crc {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001395 spinlock_t lock;
1396 bool opened; /* exclusive access to the result file */
Damien Lespiaue5f75ac2013-10-15 18:55:34 +01001397 struct intel_pipe_crc_entry *entries;
Daniel Vetter926321d2013-10-16 13:30:34 +02001398 enum intel_pipe_crc_source source;
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001399 int head, tail;
Damien Lespiau07144422013-10-15 18:55:40 +01001400 wait_queue_head_t wq;
Shuang He8bf1e9f2013-10-15 18:55:27 +01001401};
1402
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001403typedef struct drm_i915_private {
1404 struct drm_device *dev;
Chris Wilson42dcedd2012-11-15 11:32:30 +00001405 struct kmem_cache *slab;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001406
Damien Lespiau5c969aa2014-02-07 19:12:48 +00001407 const struct intel_device_info info;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001408
1409 int relative_constants_mode;
1410
1411 void __iomem *regs;
1412
Chris Wilson907b28c2013-07-19 20:36:52 +01001413 struct intel_uncore uncore;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001414
1415 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1416
Daniel Vetter28c70f12012-12-01 13:53:45 +01001417
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001418 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1419 * controller on different i2c buses. */
1420 struct mutex gmbus_mutex;
1421
1422 /**
1423 * Base address of the gmbus and gpio block.
1424 */
1425 uint32_t gpio_mmio_base;
1426
Daniel Vetter28c70f12012-12-01 13:53:45 +01001427 wait_queue_head_t gmbus_wait_queue;
1428
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001429 struct pci_dev *bridge_dev;
1430 struct intel_ring_buffer ring[I915_NUM_RINGS];
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02001431 uint32_t last_seqno, next_seqno;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001432
1433 drm_dma_handle_t *status_page_dmah;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001434 struct resource mch_res;
1435
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001436 /* protects the irq masks */
1437 spinlock_t irq_lock;
1438
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001439 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1440 struct pm_qos_request pm_qos;
1441
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001442 /* DPIO indirect register protection */
Daniel Vetter09153002012-12-12 14:06:44 +01001443 struct mutex dpio_lock;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001444
1445 /** Cached value of IMR to avoid reads in updating the bitfield */
Ben Widawskyabd58f02013-11-02 21:07:09 -07001446 union {
1447 u32 irq_mask;
1448 u32 de_irq_mask[I915_MAX_PIPES];
1449 };
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001450 u32 gt_irq_mask;
Paulo Zanoni605cd252013-08-06 18:57:15 -03001451 u32 pm_irq_mask;
Imre Deak91d181d2014-02-10 18:42:49 +02001452 u32 pipestat_irq_mask[I915_MAX_PIPES];
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001453
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001454 struct work_struct hotplug_work;
Daniel Vetter52d7ece2012-12-01 21:03:22 +01001455 bool enable_hotplug_processing;
Egbert Eichb543fb02013-04-16 13:36:54 +02001456 struct {
1457 unsigned long hpd_last_jiffies;
1458 int hpd_cnt;
1459 enum {
1460 HPD_ENABLED = 0,
1461 HPD_DISABLED = 1,
1462 HPD_MARK_DISABLED = 2
1463 } hpd_mark;
1464 } hpd_stats[HPD_NUM_PINS];
Egbert Eich142e2392013-04-11 15:57:57 +02001465 u32 hpd_event_bits;
Egbert Eichac4c16c2013-04-16 13:36:58 +02001466 struct timer_list hotplug_reenable_timer;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001467
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -07001468 struct i915_fbc fbc;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001469 struct intel_opregion opregion;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001470 struct intel_vbt_data vbt;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001471
1472 /* overlay */
1473 struct intel_overlay *overlay;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001474
Jani Nikula58c68772013-11-08 16:48:54 +02001475 /* backlight registers and fields in struct intel_panel */
1476 spinlock_t backlight_lock;
Jani Nikula31ad8ec2013-04-02 15:48:09 +03001477
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001478 /* LVDS info */
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001479 bool no_aux_handshake;
1480
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001481 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1482 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1483 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1484
1485 unsigned int fsb_freq, mem_freq, is_ddr3;
1486
Daniel Vetter645416f2013-09-02 16:22:25 +02001487 /**
1488 * wq - Driver workqueue for GEM.
1489 *
1490 * NOTE: Work items scheduled here are not allowed to grab any modeset
1491 * locks, for otherwise the flushing done in the pageflip code will
1492 * result in deadlocks.
1493 */
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001494 struct workqueue_struct *wq;
1495
1496 /* Display functions */
1497 struct drm_i915_display_funcs display;
1498
1499 /* PCH chipset type */
1500 enum intel_pch pch_type;
Paulo Zanoni17a303e2012-11-20 15:12:07 -02001501 unsigned short pch_id;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001502
1503 unsigned long quirks;
1504
Zhang Ruib8efb172013-02-05 15:41:53 +08001505 enum modeset_restore modeset_restore;
1506 struct mutex modeset_restore_lock;
Eric Anholt673a3942008-07-30 12:06:12 -07001507
Ben Widawskya7bbbd62013-07-16 16:50:07 -07001508 struct list_head vm_list; /* Global list of all address spaces */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001509 struct i915_gtt gtt; /* VMA representing the global address space */
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001510
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001511 struct i915_gem_mm mm;
Daniel Vetter87813422012-05-02 11:49:32 +02001512
Daniel Vetter87813422012-05-02 11:49:32 +02001513 /* Kernel Modesetting */
1514
yakui_zhao9b9d1722009-05-31 17:17:17 +08001515 struct sdvo_device_mapping sdvo_mappings[2];
Jesse Barnes652c3932009-08-17 13:31:43 -07001516
Damien Lespiau76c4ac02014-02-07 19:12:52 +00001517 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1518 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05001519 wait_queue_head_t pending_flip_queue;
1520
Daniel Vetterc4597872013-10-21 21:04:07 +02001521#ifdef CONFIG_DEBUG_FS
1522 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1523#endif
1524
Daniel Vettere72f9fb2013-06-05 13:34:06 +02001525 int num_shared_dpll;
1526 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
Paulo Zanoni6441ab52012-10-05 12:05:58 -03001527 struct intel_ddi_plls ddi_plls;
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001528 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
Jesse Barnesee7b9f92012-04-20 17:11:53 +01001529
Jesse Barnes652c3932009-08-17 13:31:43 -07001530 /* Reclocking support */
1531 bool render_reclock_avail;
1532 bool lvds_downclock_avail;
Zhao Yakui18f9ed12009-11-20 03:24:16 +00001533 /* indicates the reduced downclock for LVDS*/
1534 int lvds_downclock;
Jesse Barnes652c3932009-08-17 13:31:43 -07001535 u16 orig_clock;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001536
Zhenyu Wangc48044112009-12-17 14:48:43 +08001537 bool mchbar_need_disable;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001538
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001539 struct intel_l3_parity l3_parity;
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001540
Ben Widawsky59124502013-07-04 11:02:05 -07001541 /* Cannot be determined by PCIID. You must always read a register. */
1542 size_t ellc_size;
1543
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001544 /* gen6+ rps state */
Daniel Vetterc85aa882012-11-02 19:55:03 +01001545 struct intel_gen6_power_mgmt rps;
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001546
Daniel Vetter20e4d402012-08-08 23:35:39 +02001547 /* ilk-only ips/rps state. Everything in here is protected by the global
1548 * mchdev_lock in intel_pm.c */
Daniel Vetterc85aa882012-11-02 19:55:03 +01001549 struct intel_ilk_power_mgmt ips;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001550
Imre Deak83c00f552013-10-25 17:36:47 +03001551 struct i915_power_domains power_domains;
Wang Xingchaoa38911a2013-05-30 22:07:11 +08001552
Rodrigo Vivia031d702013-10-03 16:15:06 -03001553 struct i915_psr psr;
Rodrigo Vivi3f51e472013-07-11 18:45:00 -03001554
Daniel Vetter99584db2012-11-14 17:14:04 +01001555 struct i915_gpu_error gpu_error;
Chris Wilsonae681d92010-10-01 14:57:56 +01001556
Jesse Barnesc9cddff2013-05-08 10:45:13 -07001557 struct drm_i915_gem_object *vlv_pctx;
1558
Daniel Vetter4520f532013-10-09 09:18:51 +02001559#ifdef CONFIG_DRM_I915_FBDEV
Dave Airlie8be48d92010-03-30 05:34:14 +00001560 /* list of fbdev register on this device */
1561 struct intel_fbdev *fbdev;
Daniel Vetter4520f532013-10-09 09:18:51 +02001562#endif
Chris Wilsone953fd72011-02-21 22:23:52 +00001563
Jesse Barnes073f34d2012-11-02 11:13:59 -07001564 /*
1565 * The console may be contended at resume, but we don't
1566 * want it to block on it.
1567 */
1568 struct work_struct console_resume_work;
1569
Chris Wilsone953fd72011-02-21 22:23:52 +00001570 struct drm_property *broadcast_rgb_property;
Chris Wilson3f43c482011-05-12 22:17:24 +01001571 struct drm_property *force_audio_property;
Ben Widawskye3689192012-05-25 16:56:22 -07001572
Ben Widawsky254f9652012-06-04 14:42:42 -07001573 uint32_t hw_context_size;
Ben Widawskya33afea2013-09-17 21:12:45 -07001574 struct list_head context_list;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001575
Damien Lespiau3e683202012-12-11 18:48:29 +00001576 u32 fdi_rx_config;
Paulo Zanoni68d18ad2012-12-01 12:04:26 -02001577
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001578 struct i915_suspend_saved_registers regfile;
Daniel Vetter231f42a2012-11-02 19:55:05 +01001579
Ville Syrjälä53615a52013-08-01 16:18:50 +03001580 struct {
1581 /*
1582 * Raw watermark latency values:
1583 * in 0.1us units for WM0,
1584 * in 0.5us units for WM1+.
1585 */
1586 /* primary */
1587 uint16_t pri_latency[5];
1588 /* sprite */
1589 uint16_t spr_latency[5];
1590 /* cursor */
1591 uint16_t cur_latency[5];
Ville Syrjälä609cede2013-10-09 19:18:03 +03001592
1593 /* current hardware state */
Imre Deak820c1982013-12-17 14:46:36 +02001594 struct ilk_wm_values hw;
Ville Syrjälä53615a52013-08-01 16:18:50 +03001595 } wm;
1596
Paulo Zanonic67a4702013-08-19 13:18:09 -03001597 struct i915_package_c8 pc8;
1598
Paulo Zanoni8a187452013-12-06 20:32:13 -02001599 struct i915_runtime_pm pm;
1600
Daniel Vetter231f42a2012-11-02 19:55:05 +01001601 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1602 * here! */
1603 struct i915_dri1_state dri1;
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02001604 /* Old ums support infrastructure, same warning applies. */
1605 struct i915_ums_state ums;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606} drm_i915_private_t;
1607
Chris Wilson2c1792a2013-08-01 18:39:55 +01001608static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1609{
1610 return dev->dev_private;
1611}
1612
Chris Wilsonb4519512012-05-11 14:29:30 +01001613/* Iterate over initialised rings */
1614#define for_each_ring(ring__, dev_priv__, i__) \
1615 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1616 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1617
Wu Fengguangb1d7e4b2012-02-14 11:45:36 +08001618enum hdmi_force_audio {
1619 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
1620 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
1621 HDMI_AUDIO_AUTO, /* trust EDID */
1622 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1623};
1624
Daniel Vetter190d6cd2013-07-04 13:06:28 +02001625#define I915_GTT_OFFSET_NONE ((u32)-1)
Chris Wilsoned2f3452012-11-15 11:32:19 +00001626
Chris Wilson37e680a2012-06-07 15:38:42 +01001627struct drm_i915_gem_object_ops {
1628 /* Interface between the GEM object and its backing storage.
1629 * get_pages() is called once prior to the use of the associated set
1630 * of pages before to binding them into the GTT, and put_pages() is
1631 * called after we no longer need them. As we expect there to be
1632 * associated cost with migrating pages between the backing storage
1633 * and making them available for the GPU (e.g. clflush), we may hold
1634 * onto the pages after they are no longer referenced by the GPU
1635 * in case they may be used again shortly (for example migrating the
1636 * pages to a different memory domain within the GTT). put_pages()
1637 * will therefore most likely be called when the object itself is
1638 * being released or under memory pressure (where we attempt to
1639 * reap pages for the shrinker).
1640 */
1641 int (*get_pages)(struct drm_i915_gem_object *);
1642 void (*put_pages)(struct drm_i915_gem_object *);
1643};
1644
Eric Anholt673a3942008-07-30 12:06:12 -07001645struct drm_i915_gem_object {
Daniel Vetterc397b902010-04-09 19:05:07 +00001646 struct drm_gem_object base;
Eric Anholt673a3942008-07-30 12:06:12 -07001647
Chris Wilson37e680a2012-06-07 15:38:42 +01001648 const struct drm_i915_gem_object_ops *ops;
1649
Ben Widawsky2f633152013-07-17 12:19:03 -07001650 /** List of VMAs backed by this object */
1651 struct list_head vma_list;
1652
Chris Wilsonc1ad11f2012-11-15 11:32:21 +00001653 /** Stolen memory for this object, instead of being backed by shmem. */
1654 struct drm_mm_node *stolen;
Ben Widawsky35c20a62013-05-31 11:28:48 -07001655 struct list_head global_list;
Eric Anholt673a3942008-07-30 12:06:12 -07001656
Chris Wilson69dc4982010-10-19 10:36:51 +01001657 struct list_head ring_list;
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02001658 /** Used in execbuf to temporarily hold a ref */
1659 struct list_head obj_exec_link;
Eric Anholt673a3942008-07-30 12:06:12 -07001660
1661 /**
Chris Wilson65ce3022012-07-20 12:41:02 +01001662 * This is set if the object is on the active lists (has pending
1663 * rendering and so a non-zero seqno), and is not set if it i s on
1664 * inactive (ready to be unbound) list.
Eric Anholt673a3942008-07-30 12:06:12 -07001665 */
Akshay Joshi0206e352011-08-16 15:34:10 -04001666 unsigned int active:1;
Eric Anholt673a3942008-07-30 12:06:12 -07001667
1668 /**
1669 * This is set if the object has been written to since last bound
1670 * to the GTT
1671 */
Akshay Joshi0206e352011-08-16 15:34:10 -04001672 unsigned int dirty:1;
Daniel Vetter778c3542010-05-13 11:49:44 +02001673
1674 /**
1675 * Fence register bits (if any) for this object. Will be set
1676 * as needed when mapped into the GTT.
1677 * Protected by dev->struct_mutex.
Daniel Vetter778c3542010-05-13 11:49:44 +02001678 */
Daniel Vetter4b9de732011-10-09 21:52:02 +02001679 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
Daniel Vetter778c3542010-05-13 11:49:44 +02001680
1681 /**
Daniel Vetter778c3542010-05-13 11:49:44 +02001682 * Advice: are the backing pages purgeable?
1683 */
Akshay Joshi0206e352011-08-16 15:34:10 -04001684 unsigned int madv:2;
Daniel Vetter778c3542010-05-13 11:49:44 +02001685
1686 /**
Daniel Vetter778c3542010-05-13 11:49:44 +02001687 * Current tiling mode for the object.
1688 */
Akshay Joshi0206e352011-08-16 15:34:10 -04001689 unsigned int tiling_mode:2;
Chris Wilson5d82e3e2012-04-21 16:23:23 +01001690 /**
1691 * Whether the tiling parameters for the currently associated fence
1692 * register have changed. Note that for the purposes of tracking
1693 * tiling changes we also treat the unfenced register, the register
1694 * slot that the object occupies whilst it executes a fenced
1695 * command (such as BLT on gen2/3), as a "fence".
1696 */
1697 unsigned int fence_dirty:1;
Daniel Vetter778c3542010-05-13 11:49:44 +02001698
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001699 /**
Daniel Vetter75e9e912010-11-04 17:11:09 +01001700 * Is the object at the current location in the gtt mappable and
1701 * fenceable? Used to avoid costly recalculations.
1702 */
Akshay Joshi0206e352011-08-16 15:34:10 -04001703 unsigned int map_and_fenceable:1;
Daniel Vetter75e9e912010-11-04 17:11:09 +01001704
1705 /**
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001706 * Whether the current gtt mapping needs to be mappable (and isn't just
1707 * mappable by accident). Track pin and fault separate for a more
1708 * accurate mappable working set.
1709 */
Akshay Joshi0206e352011-08-16 15:34:10 -04001710 unsigned int fault_mappable:1;
1711 unsigned int pin_mappable:1;
Chris Wilsoncc98b412013-08-09 12:25:09 +01001712 unsigned int pin_display:1;
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001713
Chris Wilsoncaea7472010-11-12 13:53:37 +00001714 /*
1715 * Is the GPU currently using a fence to access this buffer,
1716 */
1717 unsigned int pending_fenced_gpu_access:1;
1718 unsigned int fenced_gpu_access:1;
1719
Chris Wilson651d7942013-08-08 14:41:10 +01001720 unsigned int cache_level:3;
Chris Wilson93dfb402011-03-29 16:59:50 -07001721
Daniel Vetter7bddb012012-02-09 17:15:47 +01001722 unsigned int has_aliasing_ppgtt_mapping:1;
Daniel Vetter74898d72012-02-15 23:50:22 +01001723 unsigned int has_global_gtt_mapping:1;
Chris Wilson9da3da62012-06-01 15:20:22 +01001724 unsigned int has_dma_mapping:1;
Daniel Vetter7bddb012012-02-09 17:15:47 +01001725
Chris Wilson9da3da62012-06-01 15:20:22 +01001726 struct sg_table *pages;
Chris Wilsona5570172012-09-04 21:02:54 +01001727 int pages_pin_count;
Eric Anholt673a3942008-07-30 12:06:12 -07001728
Daniel Vetter1286ff72012-05-10 15:25:09 +02001729 /* prime dma-buf support */
Dave Airlie9a70cc22012-05-22 13:09:21 +01001730 void *dma_buf_vmapping;
1731 int vmapping_count;
1732
Chris Wilsoncaea7472010-11-12 13:53:37 +00001733 struct intel_ring_buffer *ring;
1734
Chris Wilson1c293ea2012-04-17 15:31:27 +01001735 /** Breadcrumb of last rendering to the buffer. */
Chris Wilson0201f1e2012-07-20 12:41:01 +01001736 uint32_t last_read_seqno;
1737 uint32_t last_write_seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001738 /** Breadcrumb of last fenced GPU access to the buffer. */
1739 uint32_t last_fenced_seqno;
Eric Anholt673a3942008-07-30 12:06:12 -07001740
Daniel Vetter778c3542010-05-13 11:49:44 +02001741 /** Current tiling stride for the object, if it's tiled. */
Jesse Barnesde151cf2008-11-12 10:03:55 -08001742 uint32_t stride;
Eric Anholt673a3942008-07-30 12:06:12 -07001743
Daniel Vetter80075d42013-10-09 21:23:52 +02001744 /** References from framebuffers, locks out tiling changes. */
1745 unsigned long framebuffer_references;
1746
Eric Anholt280b7132009-03-12 16:56:27 -07001747 /** Record of address bit 17 of each page at last unbind. */
Chris Wilsond312ec22010-06-06 15:40:22 +01001748 unsigned long *bit_17;
Eric Anholt280b7132009-03-12 16:56:27 -07001749
Jesse Barnes79e53942008-11-07 14:24:08 -08001750 /** User space pin count and filp owning the pin */
Daniel Vetteraa5f8022013-10-10 14:46:37 +02001751 unsigned long user_pin_count;
Jesse Barnes79e53942008-11-07 14:24:08 -08001752 struct drm_file *pin_filp;
Dave Airlie71acb5e2008-12-30 20:31:46 +10001753
1754 /** for phy allocated objects */
1755 struct drm_i915_gem_phys_object *phys_obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001756};
Daniel Vetterb45305f2012-12-17 16:21:27 +01001757#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
Eric Anholt673a3942008-07-30 12:06:12 -07001758
Daniel Vetter62b8b212010-04-09 19:05:08 +00001759#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
Daniel Vetter23010e42010-03-08 13:35:02 +01001760
Eric Anholt673a3942008-07-30 12:06:12 -07001761/**
1762 * Request queue structure.
1763 *
1764 * The request queue allows us to note sequence numbers that have been emitted
1765 * and may be associated with active buffers to be retired.
1766 *
1767 * By keeping this list, we can avoid having to do questionable
1768 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1769 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1770 */
1771struct drm_i915_gem_request {
Zou Nan hai852835f2010-05-21 09:08:56 +08001772 /** On Which ring this request was generated */
1773 struct intel_ring_buffer *ring;
1774
Eric Anholt673a3942008-07-30 12:06:12 -07001775 /** GEM sequence number associated with this request. */
1776 uint32_t seqno;
1777
Mika Kuoppala7d736f42013-06-12 15:01:39 +03001778 /** Position in the ringbuffer of the start of the request */
1779 u32 head;
1780
1781 /** Position in the ringbuffer of the end of the request */
Chris Wilsona71d8d92012-02-15 11:25:36 +00001782 u32 tail;
1783
Mika Kuoppala0e50e962013-05-02 16:48:08 +03001784 /** Context related to this request */
1785 struct i915_hw_context *ctx;
1786
Mika Kuoppala7d736f42013-06-12 15:01:39 +03001787 /** Batch buffer related to this request if any */
1788 struct drm_i915_gem_object *batch_obj;
1789
Eric Anholt673a3942008-07-30 12:06:12 -07001790 /** Time at which this request was emitted, in jiffies. */
1791 unsigned long emitted_jiffies;
1792
Eric Anholtb9624422009-06-03 07:27:35 +00001793 /** global list entry for this request */
Eric Anholt673a3942008-07-30 12:06:12 -07001794 struct list_head list;
Eric Anholtb9624422009-06-03 07:27:35 +00001795
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001796 struct drm_i915_file_private *file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001797 /** file_priv list entry for this request */
1798 struct list_head client_list;
Eric Anholt673a3942008-07-30 12:06:12 -07001799};
1800
1801struct drm_i915_file_private {
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001802 struct drm_i915_private *dev_priv;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001803 struct drm_file *file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001804
Eric Anholt673a3942008-07-30 12:06:12 -07001805 struct {
Luis R. Rodriguez99057c82012-11-29 12:45:06 -08001806 spinlock_t lock;
Eric Anholtb9624422009-06-03 07:27:35 +00001807 struct list_head request_list;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001808 struct delayed_work idle_work;
Eric Anholt673a3942008-07-30 12:06:12 -07001809 } mm;
Ben Widawsky40521052012-06-04 14:42:43 -07001810 struct idr context_idr;
Mika Kuoppalae59ec132013-06-12 12:35:28 +03001811
Ben Widawsky0eea67e2013-12-06 14:11:19 -08001812 struct i915_hw_context *private_default_ctx;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001813 atomic_t rps_wait_boost;
Eric Anholt673a3942008-07-30 12:06:12 -07001814};
1815
Damien Lespiau5c969aa2014-02-07 19:12:48 +00001816#define INTEL_INFO(dev) (&to_i915(dev)->info)
Zou Nan haicae58522010-11-09 17:17:32 +08001817
Ville Syrjäläffbab09b2013-10-04 14:53:40 +03001818#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
1819#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
Zou Nan haicae58522010-11-09 17:17:32 +08001820#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
Ville Syrjäläffbab09b2013-10-04 14:53:40 +03001821#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
Zou Nan haicae58522010-11-09 17:17:32 +08001822#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
Ville Syrjäläffbab09b2013-10-04 14:53:40 +03001823#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
1824#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
Zou Nan haicae58522010-11-09 17:17:32 +08001825#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1826#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1827#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
Ville Syrjäläffbab09b2013-10-04 14:53:40 +03001828#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
Zou Nan haicae58522010-11-09 17:17:32 +08001829#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
Ville Syrjäläffbab09b2013-10-04 14:53:40 +03001830#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
1831#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
Zou Nan haicae58522010-11-09 17:17:32 +08001832#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1833#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
Ville Syrjäläffbab09b2013-10-04 14:53:40 +03001834#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
Jesse Barnes4b651772011-04-28 14:33:09 -07001835#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
Ville Syrjäläffbab09b2013-10-04 14:53:40 +03001836#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
1837 (dev)->pdev->device == 0x0152 || \
1838 (dev)->pdev->device == 0x015a)
1839#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
1840 (dev)->pdev->device == 0x0106 || \
1841 (dev)->pdev->device == 0x010A)
Jesse Barnes70a3eb72012-03-28 13:39:21 -07001842#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
Eugeni Dodonov4cae9ae2012-03-29 12:32:18 -03001843#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
Paulo Zanoni4e8058a2013-11-02 21:07:31 -07001844#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8)
Zou Nan haicae58522010-11-09 17:17:32 +08001845#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
Paulo Zanonied1c9e22013-08-12 14:34:08 -03001846#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
Ville Syrjäläffbab09b2013-10-04 14:53:40 +03001847 ((dev)->pdev->device & 0xFF00) == 0x0C00)
Ben Widawsky5dd8c4c2013-11-08 10:20:06 -08001848#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
1849 (((dev)->pdev->device & 0xf) == 0x2 || \
1850 ((dev)->pdev->device & 0xf) == 0x6 || \
1851 ((dev)->pdev->device & 0xf) == 0xe))
1852#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
Ville Syrjäläffbab09b2013-10-04 14:53:40 +03001853 ((dev)->pdev->device & 0xFF00) == 0x0A00)
Ben Widawsky5dd8c4c2013-11-08 10:20:06 -08001854#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
Rodrigo Vivi94353732013-08-28 16:45:46 -03001855#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
Ville Syrjäläffbab09b2013-10-04 14:53:40 +03001856 ((dev)->pdev->device & 0x00F0) == 0x0020)
Ben Widawskyb833d682013-08-23 16:00:07 -07001857#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
Zou Nan haicae58522010-11-09 17:17:32 +08001858
Jesse Barnes85436692011-04-06 12:11:14 -07001859/*
1860 * The genX designation typically refers to the render engine, so render
1861 * capability related checks should use IS_GEN, while display and other checks
1862 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1863 * chips, etc.).
1864 */
Zou Nan haicae58522010-11-09 17:17:32 +08001865#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1866#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1867#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1868#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1869#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
Jesse Barnes85436692011-04-06 12:11:14 -07001870#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
Ben Widawskyd2980842013-11-02 21:06:59 -07001871#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
Zou Nan haicae58522010-11-09 17:17:32 +08001872
Ben Widawsky73ae4782013-10-15 10:02:57 -07001873#define RENDER_RING (1<<RCS)
1874#define BSD_RING (1<<VCS)
1875#define BLT_RING (1<<BCS)
1876#define VEBOX_RING (1<<VECS)
1877#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
1878#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
1879#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02001880#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
Chris Wilson651d7942013-08-08 14:41:10 +01001881#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
Zou Nan haicae58522010-11-09 17:17:32 +08001882#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1883
Ben Widawsky254f9652012-06-04 14:42:42 -07001884#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
Ben Widawsky246cbfb2013-12-06 14:11:14 -08001885#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev))
Ben Widawskyc5dc5ce2014-01-27 23:07:00 -08001886#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \
1887 && !IS_BROADWELL(dev))
1888#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08001889#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001890
Chris Wilson05394f32010-11-08 19:18:58 +00001891#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
Zou Nan haicae58522010-11-09 17:17:32 +08001892#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1893
Daniel Vetterb45305f2012-12-17 16:21:27 +01001894/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1895#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1896
Zou Nan haicae58522010-11-09 17:17:32 +08001897/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1898 * rows, which changed the alignment requirements and fence programming.
1899 */
1900#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1901 IS_I915GM(dev)))
1902#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1903#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1904#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
Zou Nan haicae58522010-11-09 17:17:32 +08001905#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1906#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
Zou Nan haicae58522010-11-09 17:17:32 +08001907
1908#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1909#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
Daniel Vetter3a77c4c2014-01-10 08:50:12 +01001910#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
Zou Nan haicae58522010-11-09 17:17:32 +08001911
Ben Widawsky2a114cc2013-11-02 21:07:47 -07001912#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
Damien Lespiauf5adf942013-06-24 18:29:34 +01001913
Damien Lespiaudd93be52013-04-22 18:40:39 +01001914#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
Damien Lespiau30568c42013-04-22 18:40:41 +01001915#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
Ben Widawskyed8546a2013-11-04 22:45:05 -08001916#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
Chris Wilson7c6c2652013-11-18 18:32:37 -08001917#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
Paulo Zanonidf4547d2013-12-13 15:22:32 -02001918#define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev))
Paulo Zanoniaffa9352012-11-23 15:30:39 -02001919
Paulo Zanoni17a303e2012-11-20 15:12:07 -02001920#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1921#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1922#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1923#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1924#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1925#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1926
Chris Wilson2c1792a2013-08-01 18:39:55 +01001927#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
Eugeni Dodonoveb877eb2012-03-29 12:32:20 -03001928#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
Zou Nan haicae58522010-11-09 17:17:32 +08001929#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1930#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
Ben Widawsky40c7ead2013-04-05 13:12:40 -07001931#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
Paulo Zanoni45e6e3a2012-07-03 15:57:32 -03001932#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
Zou Nan haicae58522010-11-09 17:17:32 +08001933
Ben Widawsky040d2ba2013-09-19 11:01:40 -07001934/* DPF == dynamic parity feature */
1935#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1936#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
Ben Widawskye1ef7cc2012-07-24 20:47:31 -07001937
Ben Widawskyc8735b02012-09-07 19:43:39 -07001938#define GT_FREQUENCY_MULTIPLIER 50
1939
Chris Wilson05394f32010-11-08 19:18:58 +00001940#include "i915_trace.h"
1941
Rob Clarkbaa70942013-08-02 13:27:49 -04001942extern const struct drm_ioctl_desc i915_ioctls[];
Dave Airlieb3a83632005-09-30 18:37:36 +10001943extern int i915_max_ioctl;
1944
Dave Airlie6a9ee8a2010-02-01 15:38:10 +10001945extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1946extern int i915_resume(struct drm_device *dev);
Dave Airlie7c1c2872008-11-28 14:22:24 +10001947extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1948extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1949
Jani Nikulad330a952014-01-21 11:24:25 +02001950/* i915_params.c */
1951struct i915_params {
1952 int modeset;
1953 int panel_ignore_lid;
1954 unsigned int powersave;
1955 int semaphores;
1956 unsigned int lvds_downclock;
1957 int lvds_channel_mode;
1958 int panel_use_ssc;
1959 int vbt_sdvo_panel_type;
1960 int enable_rc6;
1961 int enable_fbc;
Jani Nikulad330a952014-01-21 11:24:25 +02001962 int enable_ppgtt;
1963 int enable_psr;
1964 unsigned int preliminary_hw_support;
1965 int disable_power_well;
1966 int enable_ips;
Jani Nikulad330a952014-01-21 11:24:25 +02001967 int enable_pc8;
1968 int pc8_timeout;
Damien Lespiaue5aa6542014-02-07 19:12:53 +00001969 int invert_brightness;
1970 /* leave bools at the end to not create holes */
1971 bool enable_hangcheck;
1972 bool fastboot;
Jani Nikulad330a952014-01-21 11:24:25 +02001973 bool prefault_disable;
1974 bool reset;
Damien Lespiaua0bae572014-02-10 17:20:55 +00001975 bool disable_display;
Jani Nikulad330a952014-01-21 11:24:25 +02001976};
1977extern struct i915_params i915 __read_mostly;
1978
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 /* i915_dma.c */
Daniel Vetterd05c6172012-04-26 23:28:09 +02001980void i915_update_dri1_breadcrumb(struct drm_device *dev);
Dave Airlie84b1fd12007-07-11 15:53:27 +10001981extern void i915_kernel_lost_context(struct drm_device * dev);
Dave Airlie22eae942005-11-10 22:16:34 +11001982extern int i915_driver_load(struct drm_device *, unsigned long flags);
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001983extern int i915_driver_unload(struct drm_device *);
Eric Anholt673a3942008-07-30 12:06:12 -07001984extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
Dave Airlie84b1fd12007-07-11 15:53:27 +10001985extern void i915_driver_lastclose(struct drm_device * dev);
Eric Anholt6c340ea2007-08-25 20:23:09 +10001986extern void i915_driver_preclose(struct drm_device *dev,
1987 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07001988extern void i915_driver_postclose(struct drm_device *dev,
1989 struct drm_file *file_priv);
Dave Airlie84b1fd12007-07-11 15:53:27 +10001990extern int i915_driver_device_is_agp(struct drm_device * dev);
Ben Widawskyc43b5632012-04-16 14:07:40 -07001991#ifdef CONFIG_COMPAT
Dave Airlie0d6aa602006-01-02 20:14:23 +11001992extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1993 unsigned long arg);
Ben Widawskyc43b5632012-04-16 14:07:40 -07001994#endif
Eric Anholt673a3942008-07-30 12:06:12 -07001995extern int i915_emit_box(struct drm_device *dev,
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001996 struct drm_clip_rect *box,
1997 int DR1, int DR4);
Ben Widawsky8e96d9c2012-06-04 14:42:56 -07001998extern int intel_gpu_reset(struct drm_device *dev);
Daniel Vetterd4b8bb22012-04-27 15:17:44 +02001999extern int i915_reset(struct drm_device *dev);
Jesse Barnes7648fa92010-05-20 14:28:11 -07002000extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2001extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2002extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2003extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2004
Jesse Barnes073f34d2012-11-02 11:13:59 -07002005extern void intel_console_resume(struct work_struct *work);
Dave Airlieaf6061a2008-05-07 12:15:39 +10002006
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007/* i915_irq.c */
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002008void i915_queue_hangcheck(struct drm_device *dev);
Chris Wilson527f9e92010-11-11 01:16:58 +00002009void i915_handle_error(struct drm_device *dev, bool wedged);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
Deepak S76c3552f2014-01-30 23:08:16 +05302011void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
2012 int new_delay);
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002013extern void intel_irq_init(struct drm_device *dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01002014extern void intel_hpd_init(struct drm_device *dev);
Chris Wilson907b28c2013-07-19 20:36:52 +01002015
2016extern void intel_uncore_sanitize(struct drm_device *dev);
2017extern void intel_uncore_early_sanitize(struct drm_device *dev);
2018extern void intel_uncore_init(struct drm_device *dev);
Chris Wilson907b28c2013-07-19 20:36:52 +01002019extern void intel_uncore_check_errors(struct drm_device *dev);
Chris Wilsonaec347a2013-08-26 13:46:09 +01002020extern void intel_uncore_fini(struct drm_device *dev);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002021
Keith Packard7c463582008-11-04 02:03:27 -08002022void
Imre Deak755e9012014-02-10 18:42:47 +02002023i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe,
2024 u32 status_mask);
Keith Packard7c463582008-11-04 02:03:27 -08002025
2026void
Imre Deak755e9012014-02-10 18:42:47 +02002027i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe,
2028 u32 status_mask);
Keith Packard7c463582008-11-04 02:03:27 -08002029
Eric Anholt673a3942008-07-30 12:06:12 -07002030/* i915_gem.c */
2031int i915_gem_init_ioctl(struct drm_device *dev, void *data,
2032 struct drm_file *file_priv);
2033int i915_gem_create_ioctl(struct drm_device *dev, void *data,
2034 struct drm_file *file_priv);
2035int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
2036 struct drm_file *file_priv);
2037int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
2038 struct drm_file *file_priv);
2039int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
2040 struct drm_file *file_priv);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002041int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2042 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07002043int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
2044 struct drm_file *file_priv);
2045int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
2046 struct drm_file *file_priv);
2047int i915_gem_execbuffer(struct drm_device *dev, void *data,
2048 struct drm_file *file_priv);
Jesse Barnes76446ca2009-12-17 22:05:42 -05002049int i915_gem_execbuffer2(struct drm_device *dev, void *data,
2050 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07002051int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2052 struct drm_file *file_priv);
2053int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2054 struct drm_file *file_priv);
2055int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2056 struct drm_file *file_priv);
Ben Widawsky199adf42012-09-21 17:01:20 -07002057int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
2058 struct drm_file *file);
2059int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
2060 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -07002061int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2062 struct drm_file *file_priv);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002063int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
2064 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07002065int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2066 struct drm_file *file_priv);
2067int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2068 struct drm_file *file_priv);
2069int i915_gem_set_tiling(struct drm_device *dev, void *data,
2070 struct drm_file *file_priv);
2071int i915_gem_get_tiling(struct drm_device *dev, void *data,
2072 struct drm_file *file_priv);
Eric Anholt5a125c32008-10-22 21:40:13 -07002073int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
2074 struct drm_file *file_priv);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002075int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2076 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07002077void i915_gem_load(struct drm_device *dev);
Chris Wilson42dcedd2012-11-15 11:32:30 +00002078void *i915_gem_object_alloc(struct drm_device *dev);
2079void i915_gem_object_free(struct drm_i915_gem_object *obj);
Chris Wilson37e680a2012-06-07 15:38:42 +01002080void i915_gem_object_init(struct drm_i915_gem_object *obj,
2081 const struct drm_i915_gem_object_ops *ops);
Chris Wilson05394f32010-11-08 19:18:58 +00002082struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
2083 size_t size);
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08002084void i915_init_vm(struct drm_i915_private *dev_priv,
2085 struct i915_address_space *vm);
Eric Anholt673a3942008-07-30 12:06:12 -07002086void i915_gem_free_object(struct drm_gem_object *obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07002087void i915_gem_vma_destroy(struct i915_vma *vma);
Chris Wilson42dcedd2012-11-15 11:32:30 +00002088
Daniel Vetter1ec9e262014-02-14 14:01:11 +01002089#define PIN_MAPPABLE 0x1
2090#define PIN_NONBLOCK 0x2
Daniel Vetterbf3d1492014-02-14 14:01:12 +01002091#define PIN_GLOBAL 0x4
Chris Wilson20217462010-11-23 15:26:33 +00002092int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07002093 struct i915_address_space *vm,
Chris Wilson20217462010-11-23 15:26:33 +00002094 uint32_t alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01002095 unsigned flags);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002096int __must_check i915_vma_unbind(struct i915_vma *vma);
Chris Wilsondd624af2013-01-15 12:39:35 +00002097int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
Paulo Zanoni48018a52013-12-13 15:22:31 -02002098void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
Chris Wilson05394f32010-11-08 19:18:58 +00002099void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002100void i915_gem_lastclose(struct drm_device *dev);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002101
Chris Wilson37e680a2012-06-07 15:38:42 +01002102int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01002103static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
2104{
Imre Deak67d5a502013-02-18 19:28:02 +02002105 struct sg_page_iter sg_iter;
Chris Wilson1cf83782012-10-10 12:11:52 +01002106
Imre Deak67d5a502013-02-18 19:28:02 +02002107 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
Imre Deak2db76d72013-03-26 15:14:18 +02002108 return sg_page_iter_page(&sg_iter);
Imre Deak67d5a502013-02-18 19:28:02 +02002109
2110 return NULL;
Chris Wilson9da3da62012-06-01 15:20:22 +01002111}
Chris Wilsona5570172012-09-04 21:02:54 +01002112static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
2113{
2114 BUG_ON(obj->pages == NULL);
2115 obj->pages_pin_count++;
2116}
2117static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
2118{
2119 BUG_ON(obj->pages_pin_count == 0);
2120 obj->pages_pin_count--;
2121}
2122
Chris Wilson54cf91d2010-11-25 18:00:26 +00002123int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
Ben Widawsky2911a352012-04-05 14:47:36 -07002124int i915_gem_object_sync(struct drm_i915_gem_object *obj,
2125 struct intel_ring_buffer *to);
Ben Widawskye2d05a82013-09-24 09:57:58 -07002126void i915_vma_move_to_active(struct i915_vma *vma,
2127 struct intel_ring_buffer *ring);
Dave Airlieff72145b2011-02-07 12:16:14 +10002128int i915_gem_dumb_create(struct drm_file *file_priv,
2129 struct drm_device *dev,
2130 struct drm_mode_create_dumb *args);
2131int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
2132 uint32_t handle, uint64_t *offset);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002133/**
2134 * Returns true if seq1 is later than seq2.
2135 */
2136static inline bool
2137i915_seqno_passed(uint32_t seq1, uint32_t seq2)
2138{
2139 return (int32_t)(seq1 - seq2) >= 0;
2140}
2141
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002142int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
2143int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
Chris Wilson06d98132012-04-17 15:31:24 +01002144int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002145int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
Chris Wilson20217462010-11-23 15:26:33 +00002146
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002147static inline bool
Chris Wilson1690e1e2011-12-14 13:57:08 +01002148i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
2149{
2150 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2151 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2152 dev_priv->fence_regs[obj->fence_reg].pin_count++;
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002153 return true;
2154 } else
2155 return false;
Chris Wilson1690e1e2011-12-14 13:57:08 +01002156}
2157
2158static inline void
2159i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
2160{
2161 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2162 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonb8c3af72013-06-12 11:29:47 +01002163 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
Chris Wilson1690e1e2011-12-14 13:57:08 +01002164 dev_priv->fence_regs[obj->fence_reg].pin_count--;
2165 }
2166}
2167
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002168struct drm_i915_gem_request *
2169i915_gem_find_active_request(struct intel_ring_buffer *ring);
2170
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002171bool i915_gem_retire_requests(struct drm_device *dev);
Chris Wilsona71d8d92012-02-15 11:25:36 +00002172void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
Daniel Vetter33196de2012-11-14 17:14:05 +01002173int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
Daniel Vetterd6b2c792012-07-04 22:54:13 +02002174 bool interruptible);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002175static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
2176{
2177 return unlikely(atomic_read(&error->reset_counter)
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02002178 & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002179}
2180
2181static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
2182{
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02002183 return atomic_read(&error->reset_counter) & I915_WEDGED;
2184}
2185
2186static inline u32 i915_reset_count(struct i915_gpu_error *error)
2187{
2188 return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002189}
Chris Wilsona71d8d92012-02-15 11:25:36 +00002190
Chris Wilson069efc12010-09-30 16:53:18 +01002191void i915_gem_reset(struct drm_device *dev);
Chris Wilson000433b2013-08-08 14:41:09 +01002192bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002193int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
Chris Wilson1070a422012-04-24 15:47:41 +01002194int __must_check i915_gem_init(struct drm_device *dev);
Daniel Vetterf691e2f2012-02-02 09:58:12 +01002195int __must_check i915_gem_init_hw(struct drm_device *dev);
Ben Widawskyc3787e22013-09-17 21:12:44 -07002196int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
Daniel Vetterf691e2f2012-02-02 09:58:12 +01002197void i915_gem_init_swizzling(struct drm_device *dev);
Jesse Barnes79e53942008-11-07 14:24:08 -08002198void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002199int __must_check i915_gpu_idle(struct drm_device *dev);
Chris Wilson45c5f202013-10-16 11:50:01 +01002200int __must_check i915_gem_suspend(struct drm_device *dev);
Mika Kuoppala0025c072013-06-12 12:35:30 +03002201int __i915_add_request(struct intel_ring_buffer *ring,
2202 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002203 struct drm_i915_gem_object *batch_obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002204 u32 *seqno);
2205#define i915_add_request(ring, seqno) \
Dan Carpenter854c94a2013-06-18 10:29:58 +03002206 __i915_add_request(ring, NULL, NULL, seqno)
Ben Widawsky199b2bc2012-05-24 15:03:11 -07002207int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
2208 uint32_t seqno);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002209int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
Chris Wilson20217462010-11-23 15:26:33 +00002210int __must_check
2211i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
2212 bool write);
2213int __must_check
Chris Wilsondabdfe02012-03-26 10:10:27 +02002214i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
2215int __must_check
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002216i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2217 u32 alignment,
Chris Wilson20217462010-11-23 15:26:33 +00002218 struct intel_ring_buffer *pipelined);
Chris Wilsoncc98b412013-08-09 12:25:09 +01002219void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10002220int i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00002221 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01002222 int id,
2223 int align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10002224void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00002225 struct drm_i915_gem_object *obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10002226void i915_gem_free_all_phys_object(struct drm_device *dev);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002227int i915_gem_open(struct drm_device *dev, struct drm_file *file);
Chris Wilson05394f32010-11-08 19:18:58 +00002228void i915_gem_release(struct drm_device *dev, struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -07002229
Chris Wilson467cffb2011-03-07 10:42:03 +00002230uint32_t
Imre Deak0fa87792013-01-07 21:47:35 +02002231i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
2232uint32_t
Imre Deakd8651102013-01-07 21:47:33 +02002233i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2234 int tiling_mode, bool fenced);
Chris Wilson467cffb2011-03-07 10:42:03 +00002235
Chris Wilsone4ffd172011-04-04 09:44:39 +01002236int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2237 enum i915_cache_level cache_level);
2238
Daniel Vetter1286ff72012-05-10 15:25:09 +02002239struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
2240 struct dma_buf *dma_buf);
2241
2242struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
2243 struct drm_gem_object *gem_obj, int flags);
2244
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002245void i915_gem_restore_fences(struct drm_device *dev);
2246
Ben Widawskya70a3142013-07-31 16:59:56 -07002247unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
2248 struct i915_address_space *vm);
2249bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
2250bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
2251 struct i915_address_space *vm);
2252unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
2253 struct i915_address_space *vm);
2254struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2255 struct i915_address_space *vm);
Ben Widawskyaccfef22013-08-14 11:38:35 +02002256struct i915_vma *
2257i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2258 struct i915_address_space *vm);
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07002259
2260struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08002261static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
2262 struct i915_vma *vma;
2263 list_for_each_entry(vma, &obj->vma_list, vma_link)
2264 if (vma->pin_count > 0)
2265 return true;
2266 return false;
2267}
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07002268
Ben Widawskya70a3142013-07-31 16:59:56 -07002269/* Some GGTT VM helpers */
2270#define obj_to_ggtt(obj) \
2271 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
2272static inline bool i915_is_ggtt(struct i915_address_space *vm)
2273{
2274 struct i915_address_space *ggtt =
2275 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
2276 return vm == ggtt;
2277}
2278
2279static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2280{
2281 return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
2282}
2283
2284static inline unsigned long
2285i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
2286{
2287 return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
2288}
2289
2290static inline unsigned long
2291i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2292{
2293 return i915_gem_obj_size(obj, obj_to_ggtt(obj));
2294}
Ben Widawskyc37e2202013-07-31 16:59:58 -07002295
2296static inline int __must_check
2297i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2298 uint32_t alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01002299 unsigned flags)
Ben Widawskyc37e2202013-07-31 16:59:58 -07002300{
Daniel Vetterbf3d1492014-02-14 14:01:12 +01002301 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
Ben Widawskyc37e2202013-07-31 16:59:58 -07002302}
Ben Widawskya70a3142013-07-31 16:59:56 -07002303
Daniel Vetterb2871102014-02-14 14:01:19 +01002304static inline int
2305i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2306{
2307 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
2308}
2309
2310void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
2311
Ben Widawsky254f9652012-06-04 14:42:42 -07002312/* i915_gem_context.c */
Ben Widawsky0eea67e2013-12-06 14:11:19 -08002313#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
Ben Widawsky8245be32013-11-06 13:56:29 -02002314int __must_check i915_gem_context_init(struct drm_device *dev);
Ben Widawsky254f9652012-06-04 14:42:42 -07002315void i915_gem_context_fini(struct drm_device *dev);
Ben Widawskyacce9ff2013-12-06 14:11:03 -08002316void i915_gem_context_reset(struct drm_device *dev);
Ben Widawskye422b882013-12-06 14:10:58 -08002317int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08002318int i915_gem_context_enable(struct drm_i915_private *dev_priv);
Ben Widawsky254f9652012-06-04 14:42:42 -07002319void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
Ben Widawskye0556842012-06-04 14:42:46 -07002320int i915_switch_context(struct intel_ring_buffer *ring,
Ben Widawsky41bde552013-12-06 14:11:21 -08002321 struct drm_file *file, struct i915_hw_context *to);
2322struct i915_hw_context *
2323i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
Mika Kuoppaladce32712013-04-30 13:30:33 +03002324void i915_gem_context_free(struct kref *ctx_ref);
2325static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
2326{
Ben Widawskyc4829722013-12-06 14:11:20 -08002327 if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
2328 kref_get(&ctx->ref);
Mika Kuoppaladce32712013-04-30 13:30:33 +03002329}
2330
2331static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
2332{
Ben Widawskyc4829722013-12-06 14:11:20 -08002333 if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
2334 kref_put(&ctx->ref, i915_gem_context_free);
Mika Kuoppaladce32712013-04-30 13:30:33 +03002335}
2336
Mika Kuoppala3fac8972014-01-30 16:05:48 +02002337static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
2338{
2339 return c->id == DEFAULT_CONTEXT_ID;
2340}
2341
Ben Widawsky84624812012-06-04 14:42:54 -07002342int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2343 struct drm_file *file);
2344int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2345 struct drm_file *file);
Daniel Vetter1286ff72012-05-10 15:25:09 +02002346
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01002347/* i915_gem_evict.c */
Ben Widawskyf6cd1f12013-07-31 17:00:11 -07002348int __must_check i915_gem_evict_something(struct drm_device *dev,
2349 struct i915_address_space *vm,
2350 int min_size,
Chris Wilson42d6ab42012-07-26 11:49:32 +01002351 unsigned alignment,
2352 unsigned cache_level,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01002353 unsigned flags);
Ben Widawsky68c8c172013-09-11 14:57:50 -07002354int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
Chris Wilson6c085a72012-08-20 11:40:46 +02002355int i915_gem_evict_everything(struct drm_device *dev);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01002356
Chris Wilson05394f32010-11-08 19:18:58 +00002357/* i915_gem_gtt.c */
2358void i915_check_and_clear_faults(struct drm_device *dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002359void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
2360void i915_gem_restore_gtt_mappings(struct drm_device *dev);
Chris Wilson05394f32010-11-08 19:18:58 +00002361int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002362void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
2363void i915_gem_init_global_gtt(struct drm_device *dev);
2364void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
2365 unsigned long mappable_end, unsigned long end);
2366int i915_gem_gtt_init(struct drm_device *dev);
2367static inline void i915_gem_chipset_flush(struct drm_device *dev)
2368{
2369 if (INTEL_INFO(dev)->gen < 6)
2370 intel_gtt_chipset_flush();
Chris Wilson9797fbf2012-04-24 15:47:39 +01002371}
Ben Widawsky246cbfb2013-12-06 14:11:14 -08002372int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
2373static inline bool intel_enable_ppgtt(struct drm_device *dev, bool full)
2374{
Jani Nikulad330a952014-01-21 11:24:25 +02002375 if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
Ben Widawsky246cbfb2013-12-06 14:11:14 -08002376 return false;
2377
Jani Nikulad330a952014-01-21 11:24:25 +02002378 if (i915.enable_ppgtt == 1 && full)
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08002379 return false;
Ben Widawsky246cbfb2013-12-06 14:11:14 -08002380
2381#ifdef CONFIG_INTEL_IOMMU
2382 /* Disable ppgtt on SNB if VT-d is on. */
2383 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
2384 DRM_INFO("Disabling PPGTT because VT-d is on\n");
2385 return false;
2386 }
2387#endif
2388
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08002389 if (full)
2390 return HAS_PPGTT(dev);
2391 else
2392 return HAS_ALIASING_PPGTT(dev);
Ben Widawsky246cbfb2013-12-06 14:11:14 -08002393}
2394
Chris Wilson9797fbf2012-04-24 15:47:39 +01002395/* i915_gem_stolen.c */
2396int i915_gem_init_stolen(struct drm_device *dev);
Chris Wilson11be49e2012-11-15 11:32:20 +00002397int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
2398void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
Chris Wilson9797fbf2012-04-24 15:47:39 +01002399void i915_gem_cleanup_stolen(struct drm_device *dev);
Chris Wilson0104fdb2012-11-15 11:32:26 +00002400struct drm_i915_gem_object *
2401i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
Chris Wilson866d12b2013-02-19 13:31:37 -08002402struct drm_i915_gem_object *
2403i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
2404 u32 stolen_offset,
2405 u32 gtt_offset,
2406 u32 size);
Chris Wilson0104fdb2012-11-15 11:32:26 +00002407void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
Chris Wilson9797fbf2012-04-24 15:47:39 +01002408
Eric Anholt673a3942008-07-30 12:06:12 -07002409/* i915_gem_tiling.c */
Chris Wilson2c1792a2013-08-01 18:39:55 +01002410static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Chris Wilsone9b73c62012-12-03 21:03:14 +00002411{
2412 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2413
2414 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2415 obj->tiling_mode != I915_TILING_NONE;
2416}
2417
Eric Anholt673a3942008-07-30 12:06:12 -07002418void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
2419void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
2420void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
2421
2422/* i915_gem_debug.c */
Chris Wilson23bc5982010-09-29 16:10:57 +01002423#if WATCH_LISTS
2424int i915_verify_lists(struct drm_device *dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002425#else
Chris Wilson23bc5982010-09-29 16:10:57 +01002426#define i915_verify_lists(dev) 0
Eric Anholt673a3942008-07-30 12:06:12 -07002427#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428
Ben Gamari20172632009-02-17 20:08:50 -05002429/* i915_debugfs.c */
Ben Gamari27c202a2009-07-01 22:26:52 -04002430int i915_debugfs_init(struct drm_minor *minor);
2431void i915_debugfs_cleanup(struct drm_minor *minor);
Daniel Vetterf8c168f2013-10-16 11:49:58 +02002432#ifdef CONFIG_DEBUG_FS
Damien Lespiau07144422013-10-15 18:55:40 +01002433void intel_display_crc_init(struct drm_device *dev);
2434#else
Daniel Vetterf8c168f2013-10-16 11:49:58 +02002435static inline void intel_display_crc_init(struct drm_device *dev) {}
Damien Lespiau07144422013-10-15 18:55:40 +01002436#endif
Mika Kuoppala84734a02013-07-12 16:50:57 +03002437
2438/* i915_gpu_error.c */
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03002439__printf(2, 3)
2440void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
Mika Kuoppalafc16b482013-06-06 15:18:39 +03002441int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
2442 const struct i915_error_state_file_priv *error);
Mika Kuoppala4dc955f2013-06-06 15:18:41 +03002443int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
2444 size_t count, loff_t pos);
2445static inline void i915_error_state_buf_release(
2446 struct drm_i915_error_state_buf *eb)
2447{
2448 kfree(eb->buf);
2449}
Mika Kuoppala84734a02013-07-12 16:50:57 +03002450void i915_capture_error_state(struct drm_device *dev);
2451void i915_error_state_get(struct drm_device *dev,
2452 struct i915_error_state_file_priv *error_priv);
2453void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2454void i915_destroy_error_state(struct drm_device *dev);
2455
2456void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2457const char *i915_cache_level_str(int type);
Ben Gamari20172632009-02-17 20:08:50 -05002458
Jesse Barnes317c35d2008-08-25 15:11:06 -07002459/* i915_suspend.c */
2460extern int i915_save_state(struct drm_device *dev);
2461extern int i915_restore_state(struct drm_device *dev);
2462
Daniel Vetterd8157a32013-01-25 17:53:20 +01002463/* i915_ums.c */
2464void i915_save_display_reg(struct drm_device *dev);
2465void i915_restore_display_reg(struct drm_device *dev);
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002466
Ben Widawsky0136db582012-04-10 21:17:01 -07002467/* i915_sysfs.c */
2468void i915_setup_sysfs(struct drm_device *dev_priv);
2469void i915_teardown_sysfs(struct drm_device *dev_priv);
2470
Chris Wilsonf899fc62010-07-20 15:44:45 -07002471/* intel_i2c.c */
2472extern int intel_setup_gmbus(struct drm_device *dev);
2473extern void intel_teardown_gmbus(struct drm_device *dev);
Jan-Simon Möller8f375e12013-05-06 14:52:08 +02002474static inline bool intel_gmbus_is_port_valid(unsigned port)
Daniel Kurtz3bd7d902012-03-28 02:36:14 +08002475{
Daniel Kurtz2ed06c92012-03-28 02:36:15 +08002476 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
Daniel Kurtz3bd7d902012-03-28 02:36:14 +08002477}
2478
2479extern struct i2c_adapter *intel_gmbus_get_adapter(
2480 struct drm_i915_private *dev_priv, unsigned port);
Chris Wilsone957d772010-09-24 12:52:03 +01002481extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
2482extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
Jan-Simon Möller8f375e12013-05-06 14:52:08 +02002483static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
Chris Wilsonb8232e92010-09-28 16:41:32 +01002484{
2485 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
2486}
Chris Wilsonf899fc62010-07-20 15:44:45 -07002487extern void intel_i2c_reset(struct drm_device *dev);
2488
Chris Wilson3b617962010-08-24 09:02:58 +01002489/* intel_opregion.c */
Jani Nikula9c4b0a62013-08-30 19:40:30 +03002490struct intel_encoder;
Chris Wilson44834a62010-08-19 16:09:23 +01002491extern int intel_opregion_setup(struct drm_device *dev);
2492#ifdef CONFIG_ACPI
2493extern void intel_opregion_init(struct drm_device *dev);
2494extern void intel_opregion_fini(struct drm_device *dev);
Chris Wilson3b617962010-08-24 09:02:58 +01002495extern void intel_opregion_asle_intr(struct drm_device *dev);
Jani Nikula9c4b0a62013-08-30 19:40:30 +03002496extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
2497 bool enable);
Jani Nikulaecbc5cf2013-08-30 19:40:31 +03002498extern int intel_opregion_notify_adapter(struct drm_device *dev,
2499 pci_power_t state);
Len Brown65e082c2008-10-24 17:18:10 -04002500#else
Chris Wilson44834a62010-08-19 16:09:23 +01002501static inline void intel_opregion_init(struct drm_device *dev) { return; }
2502static inline void intel_opregion_fini(struct drm_device *dev) { return; }
Chris Wilson3b617962010-08-24 09:02:58 +01002503static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
Jani Nikula9c4b0a62013-08-30 19:40:30 +03002504static inline int
2505intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
2506{
2507 return 0;
2508}
Jani Nikulaecbc5cf2013-08-30 19:40:31 +03002509static inline int
2510intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
2511{
2512 return 0;
2513}
Len Brown65e082c2008-10-24 17:18:10 -04002514#endif
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +01002515
Jesse Barnes723bfd72010-10-07 16:01:13 -07002516/* intel_acpi.c */
2517#ifdef CONFIG_ACPI
2518extern void intel_register_dsm_handler(void);
2519extern void intel_unregister_dsm_handler(void);
2520#else
2521static inline void intel_register_dsm_handler(void) { return; }
2522static inline void intel_unregister_dsm_handler(void) { return; }
2523#endif /* CONFIG_ACPI */
2524
Jesse Barnes79e53942008-11-07 14:24:08 -08002525/* modesetting */
Daniel Vetterf8175862012-04-10 15:50:11 +02002526extern void intel_modeset_init_hw(struct drm_device *dev);
Imre Deak7d708ee2013-04-17 14:04:50 +03002527extern void intel_modeset_suspend_hw(struct drm_device *dev);
Jesse Barnes79e53942008-11-07 14:24:08 -08002528extern void intel_modeset_init(struct drm_device *dev);
Chris Wilson2c7111d2011-03-29 10:40:27 +01002529extern void intel_modeset_gem_init(struct drm_device *dev);
Jesse Barnes79e53942008-11-07 14:24:08 -08002530extern void intel_modeset_cleanup(struct drm_device *dev);
Imre Deak4932e2c2014-02-11 17:12:48 +02002531extern void intel_connector_unregister(struct intel_connector *);
Dave Airlie28d52042009-09-21 14:33:58 +10002532extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
Daniel Vetter45e2b5f2012-11-23 18:16:34 +01002533extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2534 bool force_restore);
Daniel Vetter44cec742013-01-25 17:53:21 +01002535extern void i915_redisable_vga(struct drm_device *dev);
Imre Deak04098752014-02-18 00:02:16 +02002536extern void i915_redisable_vga_power_on(struct drm_device *dev);
Adam Jacksonee5382a2010-04-23 11:17:39 -04002537extern bool intel_fbc_enabled(struct drm_device *dev);
Chris Wilson43a95392011-07-08 12:22:36 +01002538extern void intel_disable_fbc(struct drm_device *dev);
Jesse Barnes7648fa92010-05-20 14:28:11 -07002539extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
Paulo Zanonidde86e22012-12-01 12:04:25 -02002540extern void intel_init_pch_refclk(struct drm_device *dev);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08002541extern void gen6_set_rps(struct drm_device *dev, u8 val);
Jesse Barnes0a073b82013-04-17 15:54:58 -07002542extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2543extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
2544extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
Akshay Joshi0206e352011-08-16 15:34:10 -04002545extern void intel_detect_pch(struct drm_device *dev);
2546extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
Ben Widawsky0136db582012-04-10 21:17:01 -07002547extern int intel_enable_rc6(const struct drm_device *dev);
Zhenyu Wang3bad0782010-04-07 16:15:53 +08002548
Ben Widawsky2911a352012-04-05 14:47:36 -07002549extern bool i915_semaphore_is_enabled(struct drm_device *dev);
Ben Widawskyc0c7bab2012-07-12 11:01:05 -07002550int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2551 struct drm_file *file);
Mika Kuoppalab6359912013-10-30 15:44:16 +02002552int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
2553 struct drm_file *file);
Jesse Barnes575155a2012-03-28 13:39:37 -07002554
Chris Wilson6ef3d422010-08-04 20:26:07 +01002555/* overlay */
2556extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03002557extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
2558 struct intel_overlay_error_state *error);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +00002559
2560extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03002561extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +00002562 struct drm_device *dev,
2563 struct intel_display_error_state *error);
Chris Wilson6ef3d422010-08-04 20:26:07 +01002564
Ben Widawskyb7287d82011-04-25 11:22:22 -07002565/* On SNB platform, before reading ring registers forcewake bit
2566 * must be set to prevent GT core from power down and stale values being
2567 * returned.
2568 */
Deepak Sc8d9a592013-11-23 14:55:42 +05302569void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2570void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
Paulo Zanonie998c402014-02-21 13:52:26 -03002571void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
Ben Widawskyb7287d82011-04-25 11:22:22 -07002572
Ben Widawsky42c05262012-09-26 10:34:00 -07002573int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2574int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
Jani Nikula59de0812013-05-22 15:36:16 +03002575
2576/* intel_sideband.c */
Jani Nikula64936252013-05-22 15:36:20 +03002577u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2578void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2579u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
Jani Nikulae9f882a2013-08-27 15:12:14 +03002580u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
2581void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2582u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
2583void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2584u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
2585void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
Jesse Barnesf3419152013-11-04 11:52:44 -08002586u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
2587void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
Jani Nikulae9f882a2013-08-27 15:12:14 +03002588u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
2589void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002590u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
2591void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
Jani Nikula59de0812013-05-22 15:36:16 +03002592u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2593 enum intel_sbi_destination destination);
2594void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2595 enum intel_sbi_destination destination);
Shobhit Kumare9fe51c2013-12-10 12:14:55 +05302596u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
2597void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
Jesse Barnes0a073b82013-04-17 15:54:58 -07002598
Ville Syrjälä2ec38152013-11-05 22:42:29 +02002599int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
2600int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
Ben Widawsky42c05262012-09-26 10:34:00 -07002601
Deepak S940aece2013-11-23 14:55:43 +05302602void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2603void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
2604
2605#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
2606 (((reg) >= 0x2000 && (reg) < 0x4000) ||\
2607 ((reg) >= 0x5000 && (reg) < 0x8000) ||\
2608 ((reg) >= 0xB000 && (reg) < 0x12000) ||\
2609 ((reg) >= 0x2E000 && (reg) < 0x30000))
2610
2611#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
2612 (((reg) >= 0x12000 && (reg) < 0x14000) ||\
2613 ((reg) >= 0x22000 && (reg) < 0x24000) ||\
2614 ((reg) >= 0x30000 && (reg) < 0x40000))
2615
Deepak Sc8d9a592013-11-23 14:55:42 +05302616#define FORCEWAKE_RENDER (1 << 0)
2617#define FORCEWAKE_MEDIA (1 << 1)
2618#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
2619
2620
Ben Widawsky0b274482013-10-04 21:22:51 -07002621#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
2622#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
Keith Packard5f753772010-11-22 09:24:22 +00002623
Ben Widawsky0b274482013-10-04 21:22:51 -07002624#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
2625#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
2626#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
2627#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
Keith Packard5f753772010-11-22 09:24:22 +00002628
Ben Widawsky0b274482013-10-04 21:22:51 -07002629#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
2630#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
2631#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
2632#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
Keith Packard5f753772010-11-22 09:24:22 +00002633
Ben Widawsky0b274482013-10-04 21:22:51 -07002634#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
2635#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
Zou Nan haicae58522010-11-09 17:17:32 +08002636
2637#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2638#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2639
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02002640/* "Broadcast RGB" property */
2641#define INTEL_BROADCAST_RGB_AUTO 0
2642#define INTEL_BROADCAST_RGB_FULL 1
2643#define INTEL_BROADCAST_RGB_LIMITED 2
Yuanhan Liuba4f01a2010-11-08 17:09:41 +08002644
Ville Syrjälä766aa1c2013-01-25 21:44:46 +02002645static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2646{
2647 if (HAS_PCH_SPLIT(dev))
2648 return CPU_VGACNTRL;
2649 else if (IS_VALLEYVIEW(dev))
2650 return VLV_VGACNTRL;
2651 else
2652 return VGACNTRL;
2653}
2654
Ville Syrjälä2bb46292013-02-22 16:12:51 +02002655static inline void __user *to_user_ptr(u64 address)
2656{
2657 return (void __user *)(uintptr_t)address;
2658}
2659
Imre Deakdf977292013-05-21 20:03:17 +03002660static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
2661{
2662 unsigned long j = msecs_to_jiffies(m);
2663
2664 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2665}
2666
2667static inline unsigned long
2668timespec_to_jiffies_timeout(const struct timespec *value)
2669{
2670 unsigned long j = timespec_to_jiffies(value);
2671
2672 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2673}
2674
Paulo Zanonidce56b32013-12-19 14:29:40 -02002675/*
2676 * If you need to wait X milliseconds between events A and B, but event B
2677 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
2678 * when event A happened, then just before event B you call this function and
2679 * pass the timestamp as the first argument, and X as the second argument.
2680 */
2681static inline void
2682wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
2683{
Imre Deakec5e0cf2014-01-29 13:25:40 +02002684 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
Paulo Zanonidce56b32013-12-19 14:29:40 -02002685
2686 /*
2687 * Don't re-read the value of "jiffies" every time since it may change
2688 * behind our back and break the math.
2689 */
2690 tmp_jiffies = jiffies;
2691 target_jiffies = timestamp_jiffies +
2692 msecs_to_jiffies_timeout(to_wait_ms);
2693
2694 if (time_after(target_jiffies, tmp_jiffies)) {
Imre Deakec5e0cf2014-01-29 13:25:40 +02002695 remaining_jiffies = target_jiffies - tmp_jiffies;
2696 while (remaining_jiffies)
2697 remaining_jiffies =
2698 schedule_timeout_uninterruptible(remaining_jiffies);
Paulo Zanonidce56b32013-12-19 14:29:40 -02002699 }
2700}
2701
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702#endif