blob: 886ebd71b28d8a6360e68a5105a54269571cd7cb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Dave Airliebc54fd12005-06-23 22:46:46 +10004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10007 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110028 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
Chris Wilsone9b73c62012-12-03 21:03:14 +000033#include <uapi/drm/i915_drm.h>
Tvrtko Ursulin93b81f52015-02-10 17:16:05 +000034#include <uapi/drm/drm_fourcc.h>
Chris Wilsone9b73c62012-12-03 21:03:14 +000035
Keith Packard0839ccb2008-10-30 19:38:48 -070036#include <linux/io-mapping.h>
Chris Wilsonf899fc62010-07-20 15:44:45 -070037#include <linux/i2c.h>
Daniel Vetterc167a6f2012-02-28 00:43:09 +010038#include <linux/i2c-algo-bit.h>
Matthew Garrettaaa6fd22011-08-12 12:11:33 +020039#include <linux/backlight.h>
Chris Wilson5cc9ed42014-05-16 14:22:37 +010040#include <linux/hashtable.h>
Ben Widawsky2911a352012-04-05 14:47:36 -070041#include <linux/intel-iommu.h>
Daniel Vetter742cbee2012-04-27 15:17:39 +020042#include <linux/kref.h>
Daniel Vetter9ee32fea2012-12-01 13:53:48 +010043#include <linux/pm_qos.h>
Chris Wilsone73bdd22016-04-13 17:35:01 +010044#include <linux/shmem_fs.h>
45
46#include <drm/drmP.h>
47#include <drm/intel-gtt.h>
48#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
49#include <drm/drm_gem.h>
Daniel Vetter3b96a0b2016-06-21 10:54:22 +020050#include <drm/drm_auth.h>
Chris Wilsone73bdd22016-04-13 17:35:01 +010051
52#include "i915_params.h"
53#include "i915_reg.h"
54
55#include "intel_bios.h"
Ander Conselvan de Oliveiraac7f11c2016-03-08 17:46:19 +020056#include "intel_dpll_mgr.h"
Chris Wilsone73bdd22016-04-13 17:35:01 +010057#include "intel_guc.h"
58#include "intel_lrc.h"
59#include "intel_ringbuffer.h"
60
Chris Wilsond501b1d2016-04-13 17:35:02 +010061#include "i915_gem.h"
Chris Wilsone73bdd22016-04-13 17:35:01 +010062#include "i915_gem_gtt.h"
63#include "i915_gem_render_state.h"
Chris Wilson05235c52016-07-20 09:21:08 +010064#include "i915_gem_request.h"
Jesse Barnes585fb112008-07-29 11:54:06 -070065
Zhi Wang0ad35fe2016-06-16 08:07:00 -040066#include "intel_gvt.h"
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* General customization:
69 */
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#define DRIVER_NAME "i915"
72#define DRIVER_DESC "Intel Graphics"
Daniel Vetterc4a8a7c2016-09-02 08:34:08 +020073#define DRIVER_DATE "20160902"
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Mika Kuoppalac883ef12014-10-28 17:32:30 +020075#undef WARN_ON
Daniel Vetter5f77eeb2014-12-08 16:40:10 +010076/* Many gcc seem to no see through this and fall over :( */
77#if 0
78#define WARN_ON(x) ({ \
79 bool __i915_warn_cond = (x); \
80 if (__builtin_constant_p(__i915_warn_cond)) \
81 BUILD_BUG_ON(__i915_warn_cond); \
82 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
83#else
Joonas Lahtinen152b2262015-12-18 14:27:27 +020084#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
Daniel Vetter5f77eeb2014-12-08 16:40:10 +010085#endif
86
Jani Nikulacd9bfac2015-03-12 13:01:12 +020087#undef WARN_ON_ONCE
Joonas Lahtinen152b2262015-12-18 14:27:27 +020088#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
Jani Nikulacd9bfac2015-03-12 13:01:12 +020089
Daniel Vetter5f77eeb2014-12-08 16:40:10 +010090#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
91 (long) (x), __func__);
Mika Kuoppalac883ef12014-10-28 17:32:30 +020092
Rob Clarke2c719b2014-12-15 13:56:32 -050093/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
94 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
95 * which may not necessarily be a user visible problem. This will either
96 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
97 * enable distros and users to tailor their preferred amount of i915 abrt
98 * spam.
99 */
100#define I915_STATE_WARN(condition, format...) ({ \
101 int __ret_warn_on = !!(condition); \
Joonas Lahtinen32753cb2015-12-18 14:27:26 +0200102 if (unlikely(__ret_warn_on)) \
103 if (!WARN(i915.verbose_state_checks, format)) \
Rob Clarke2c719b2014-12-15 13:56:32 -0500104 DRM_ERROR(format); \
Rob Clarke2c719b2014-12-15 13:56:32 -0500105 unlikely(__ret_warn_on); \
106})
107
Joonas Lahtinen152b2262015-12-18 14:27:27 +0200108#define I915_STATE_WARN_ON(x) \
109 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
Jesse Barnes317c35d2008-08-25 15:11:06 -0700110
Imre Deak4fec15d2016-03-16 13:39:08 +0200111bool __i915_inject_load_failure(const char *func, int line);
112#define i915_inject_load_failure() \
113 __i915_inject_load_failure(__func__, __LINE__)
114
Jani Nikula42a8ca42015-08-27 16:23:30 +0300115static inline const char *yesno(bool v)
116{
117 return v ? "yes" : "no";
118}
119
Jani Nikula87ad3212016-01-14 12:53:34 +0200120static inline const char *onoff(bool v)
121{
122 return v ? "on" : "off";
123}
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125enum pipe {
Jesse Barnes317c35d2008-08-25 15:11:06 -0700126 INVALID_PIPE = -1,
127 PIPE_A = 0,
128 PIPE_B,
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800129 PIPE_C,
Antti Koskipaaa57c7742014-02-04 14:22:24 +0200130 _PIPE_EDP,
131 I915_MAX_PIPES = _PIPE_EDP
Jesse Barnes317c35d2008-08-25 15:11:06 -0700132};
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800133#define pipe_name(p) ((p) + 'A')
Jesse Barnes317c35d2008-08-25 15:11:06 -0700134
Paulo Zanonia5c961d2012-10-24 15:59:34 -0200135enum transcoder {
136 TRANSCODER_A = 0,
137 TRANSCODER_B,
138 TRANSCODER_C,
Antti Koskipaaa57c7742014-02-04 14:22:24 +0200139 TRANSCODER_EDP,
Jani Nikula4d1de972016-03-18 17:05:42 +0200140 TRANSCODER_DSI_A,
141 TRANSCODER_DSI_C,
Antti Koskipaaa57c7742014-02-04 14:22:24 +0200142 I915_MAX_TRANSCODERS
Paulo Zanonia5c961d2012-10-24 15:59:34 -0200143};
Jani Nikulada205632016-03-15 21:51:10 +0200144
145static inline const char *transcoder_name(enum transcoder transcoder)
146{
147 switch (transcoder) {
148 case TRANSCODER_A:
149 return "A";
150 case TRANSCODER_B:
151 return "B";
152 case TRANSCODER_C:
153 return "C";
154 case TRANSCODER_EDP:
155 return "EDP";
Jani Nikula4d1de972016-03-18 17:05:42 +0200156 case TRANSCODER_DSI_A:
157 return "DSI A";
158 case TRANSCODER_DSI_C:
159 return "DSI C";
Jani Nikulada205632016-03-15 21:51:10 +0200160 default:
161 return "<invalid>";
162 }
163}
Paulo Zanonia5c961d2012-10-24 15:59:34 -0200164
Jani Nikula4d1de972016-03-18 17:05:42 +0200165static inline bool transcoder_is_dsi(enum transcoder transcoder)
166{
167 return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
168}
169
Damien Lespiau84139d12014-03-28 00:18:32 +0530170/*
Matt Roper31409e92015-09-24 15:53:09 -0700171 * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
172 * number of planes per CRTC. Not all platforms really have this many planes,
173 * which means some arrays of size I915_MAX_PLANES may have unused entries
174 * between the topmost sprite plane and the cursor plane.
Damien Lespiau84139d12014-03-28 00:18:32 +0530175 */
Jesse Barnes80824002009-09-10 15:28:06 -0700176enum plane {
177 PLANE_A = 0,
178 PLANE_B,
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800179 PLANE_C,
Matt Roper31409e92015-09-24 15:53:09 -0700180 PLANE_CURSOR,
181 I915_MAX_PLANES,
Jesse Barnes80824002009-09-10 15:28:06 -0700182};
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800183#define plane_name(p) ((p) + 'A')
Keith Packard52440212008-11-18 09:30:25 -0800184
Damien Lespiaud615a162014-03-03 17:31:48 +0000185#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
Ville Syrjälä06da8da2013-04-17 17:48:51 +0300186
Eugeni Dodonov2b139522012-03-29 12:32:22 -0300187enum port {
188 PORT_A = 0,
189 PORT_B,
190 PORT_C,
191 PORT_D,
192 PORT_E,
193 I915_MAX_PORTS
194};
195#define port_name(p) ((p) + 'A')
196
Chon Ming Leea09cadd2014-04-09 13:28:14 +0300197#define I915_NUM_PHYS_VLV 2
Chon Ming Leee4607fc2013-11-06 14:36:35 +0800198
199enum dpio_channel {
200 DPIO_CH0,
201 DPIO_CH1
202};
203
204enum dpio_phy {
205 DPIO_PHY0,
206 DPIO_PHY1
207};
208
Paulo Zanonib97186f2013-05-03 12:15:36 -0300209enum intel_display_power_domain {
210 POWER_DOMAIN_PIPE_A,
211 POWER_DOMAIN_PIPE_B,
212 POWER_DOMAIN_PIPE_C,
213 POWER_DOMAIN_PIPE_A_PANEL_FITTER,
214 POWER_DOMAIN_PIPE_B_PANEL_FITTER,
215 POWER_DOMAIN_PIPE_C_PANEL_FITTER,
216 POWER_DOMAIN_TRANSCODER_A,
217 POWER_DOMAIN_TRANSCODER_B,
218 POWER_DOMAIN_TRANSCODER_C,
Imre Deakf52e3532013-10-16 17:25:48 +0300219 POWER_DOMAIN_TRANSCODER_EDP,
Jani Nikula4d1de972016-03-18 17:05:42 +0200220 POWER_DOMAIN_TRANSCODER_DSI_A,
221 POWER_DOMAIN_TRANSCODER_DSI_C,
Patrik Jakobsson6331a702015-11-09 16:48:21 +0100222 POWER_DOMAIN_PORT_DDI_A_LANES,
223 POWER_DOMAIN_PORT_DDI_B_LANES,
224 POWER_DOMAIN_PORT_DDI_C_LANES,
225 POWER_DOMAIN_PORT_DDI_D_LANES,
226 POWER_DOMAIN_PORT_DDI_E_LANES,
Imre Deak319be8a2014-03-04 19:22:57 +0200227 POWER_DOMAIN_PORT_DSI,
228 POWER_DOMAIN_PORT_CRT,
229 POWER_DOMAIN_PORT_OTHER,
Ville Syrjäläcdf8dd72013-09-16 17:38:30 +0300230 POWER_DOMAIN_VGA,
Imre Deakfbeeaa22013-11-25 17:15:28 +0200231 POWER_DOMAIN_AUDIO,
Paulo Zanonibd2bb1b2014-07-04 11:27:38 -0300232 POWER_DOMAIN_PLLS,
Satheeshakrishna M14071212015-01-16 15:57:51 +0000233 POWER_DOMAIN_AUX_A,
234 POWER_DOMAIN_AUX_B,
235 POWER_DOMAIN_AUX_C,
236 POWER_DOMAIN_AUX_D,
Ville Syrjäläf0ab43e2015-11-09 16:48:19 +0100237 POWER_DOMAIN_GMBUS,
Patrik Jakobssondfa57622015-11-09 16:48:22 +0100238 POWER_DOMAIN_MODESET,
Imre Deakbaa70702013-10-25 17:36:48 +0300239 POWER_DOMAIN_INIT,
Imre Deakbddc7642013-10-16 17:25:49 +0300240
241 POWER_DOMAIN_NUM,
Paulo Zanonib97186f2013-05-03 12:15:36 -0300242};
243
244#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
245#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
246 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
Imre Deakf52e3532013-10-16 17:25:48 +0300247#define POWER_DOMAIN_TRANSCODER(tran) \
248 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
249 (tran) + POWER_DOMAIN_TRANSCODER_A)
Paulo Zanonib97186f2013-05-03 12:15:36 -0300250
Egbert Eich1d843f92013-02-25 12:06:49 -0500251enum hpd_pin {
252 HPD_NONE = 0,
Egbert Eich1d843f92013-02-25 12:06:49 -0500253 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
254 HPD_CRT,
255 HPD_SDVO_B,
256 HPD_SDVO_C,
Imre Deakcc24fcd2015-07-21 15:32:45 -0700257 HPD_PORT_A,
Egbert Eich1d843f92013-02-25 12:06:49 -0500258 HPD_PORT_B,
259 HPD_PORT_C,
260 HPD_PORT_D,
Xiong Zhang26951ca2015-08-17 15:55:50 +0800261 HPD_PORT_E,
Egbert Eich1d843f92013-02-25 12:06:49 -0500262 HPD_NUM_PINS
263};
264
Jani Nikulac91711f2015-05-28 15:43:48 +0300265#define for_each_hpd_pin(__pin) \
266 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
267
Jani Nikula5fcece82015-05-27 15:03:42 +0300268struct i915_hotplug {
269 struct work_struct hotplug_work;
270
271 struct {
272 unsigned long last_jiffies;
273 int count;
274 enum {
275 HPD_ENABLED = 0,
276 HPD_DISABLED = 1,
277 HPD_MARK_DISABLED = 2
278 } state;
279 } stats[HPD_NUM_PINS];
280 u32 event_bits;
281 struct delayed_work reenable_work;
282
283 struct intel_digital_port *irq_port[I915_MAX_PORTS];
284 u32 long_port_mask;
285 u32 short_port_mask;
286 struct work_struct dig_port_work;
287
Lyude19625e82016-06-21 17:03:44 -0400288 struct work_struct poll_init_work;
289 bool poll_enabled;
290
Jani Nikula5fcece82015-05-27 15:03:42 +0300291 /*
292 * if we get a HPD irq from DP and a HPD irq from non-DP
293 * the non-DP HPD could block the workqueue on a mode config
294 * mutex getting, that userspace may have taken. However
295 * userspace is waiting on the DP workqueue to run which is
296 * blocked behind the non-DP one.
297 */
298 struct workqueue_struct *dp_wq;
299};
300
Chris Wilson2a2d5482012-12-03 11:49:06 +0000301#define I915_GEM_GPU_DOMAINS \
302 (I915_GEM_DOMAIN_RENDER | \
303 I915_GEM_DOMAIN_SAMPLER | \
304 I915_GEM_DOMAIN_COMMAND | \
305 I915_GEM_DOMAIN_INSTRUCTION | \
306 I915_GEM_DOMAIN_VERTEX)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700307
Damien Lespiau055e3932014-08-18 13:49:10 +0100308#define for_each_pipe(__dev_priv, __p) \
309 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
Ville Syrjälä6831f3e2016-02-19 20:47:31 +0200310#define for_each_pipe_masked(__dev_priv, __p, __mask) \
311 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
312 for_each_if ((__mask) & (1 << (__p)))
Damien Lespiaudd740782015-02-28 14:54:08 +0000313#define for_each_plane(__dev_priv, __pipe, __p) \
314 for ((__p) = 0; \
315 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
316 (__p)++)
Damien Lespiau3bdcfc02015-02-28 14:54:09 +0000317#define for_each_sprite(__dev_priv, __p, __s) \
318 for ((__s) = 0; \
319 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
320 (__s)++)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800321
Jani Nikulac3aeadc82016-03-15 21:51:09 +0200322#define for_each_port_masked(__port, __ports_mask) \
323 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
324 for_each_if ((__ports_mask) & (1 << (__port)))
325
Damien Lespiaud79b8142014-05-13 23:32:23 +0100326#define for_each_crtc(dev, crtc) \
Chris Wilson91c8a322016-07-05 10:40:23 +0100327 list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
Damien Lespiaud79b8142014-05-13 23:32:23 +0100328
Maarten Lankhorst27321ae2015-04-21 17:12:52 +0300329#define for_each_intel_plane(dev, intel_plane) \
330 list_for_each_entry(intel_plane, \
Chris Wilson91c8a322016-07-05 10:40:23 +0100331 &(dev)->mode_config.plane_list, \
Maarten Lankhorst27321ae2015-04-21 17:12:52 +0300332 base.head)
333
Matt Roperc107acf2016-05-12 07:06:01 -0700334#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
Chris Wilson91c8a322016-07-05 10:40:23 +0100335 list_for_each_entry(intel_plane, \
336 &(dev)->mode_config.plane_list, \
Matt Roperc107acf2016-05-12 07:06:01 -0700337 base.head) \
338 for_each_if ((plane_mask) & \
339 (1 << drm_plane_index(&intel_plane->base)))
340
Ville Syrjälä262cd2e2015-06-24 22:00:04 +0300341#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
342 list_for_each_entry(intel_plane, \
343 &(dev)->mode_config.plane_list, \
344 base.head) \
Jani Nikula95150bd2015-11-24 21:21:56 +0200345 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
Ville Syrjälä262cd2e2015-06-24 22:00:04 +0300346
Chris Wilson91c8a322016-07-05 10:40:23 +0100347#define for_each_intel_crtc(dev, intel_crtc) \
348 list_for_each_entry(intel_crtc, \
349 &(dev)->mode_config.crtc_list, \
350 base.head)
Damien Lespiaud063ae42014-05-13 23:32:21 +0100351
Chris Wilson91c8a322016-07-05 10:40:23 +0100352#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
353 list_for_each_entry(intel_crtc, \
354 &(dev)->mode_config.crtc_list, \
355 base.head) \
Matt Roper98d39492016-05-12 07:06:03 -0700356 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
357
Damien Lespiaub2784e12014-08-05 11:29:37 +0100358#define for_each_intel_encoder(dev, intel_encoder) \
359 list_for_each_entry(intel_encoder, \
360 &(dev)->mode_config.encoder_list, \
361 base.head)
362
Ander Conselvan de Oliveira3a3371f2015-03-03 15:21:56 +0200363#define for_each_intel_connector(dev, intel_connector) \
364 list_for_each_entry(intel_connector, \
Chris Wilson91c8a322016-07-05 10:40:23 +0100365 &(dev)->mode_config.connector_list, \
Ander Conselvan de Oliveira3a3371f2015-03-03 15:21:56 +0200366 base.head)
367
Daniel Vetter6c2b7c12012-07-05 09:50:24 +0200368#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
369 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
Jani Nikula95150bd2015-11-24 21:21:56 +0200370 for_each_if ((intel_encoder)->base.crtc == (__crtc))
Daniel Vetter6c2b7c12012-07-05 09:50:24 +0200371
Jesse Barnes53f5e3c2014-02-07 12:48:15 -0800372#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
373 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
Jani Nikula95150bd2015-11-24 21:21:56 +0200374 for_each_if ((intel_connector)->base.encoder == (__encoder))
Jesse Barnes53f5e3c2014-02-07 12:48:15 -0800375
Borun Fub04c5bd2014-07-12 10:02:27 +0530376#define for_each_power_domain(domain, mask) \
377 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
Jani Nikula95150bd2015-11-24 21:21:56 +0200378 for_each_if ((1 << (domain)) & (mask))
Borun Fub04c5bd2014-07-12 10:02:27 +0530379
Daniel Vettere7b903d2013-06-05 13:34:14 +0200380struct drm_i915_private;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100381struct i915_mm_struct;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100382struct i915_mmu_object;
Daniel Vettere7b903d2013-06-05 13:34:14 +0200383
Chris Wilsona6f766f2015-04-27 13:41:20 +0100384struct drm_i915_file_private {
385 struct drm_i915_private *dev_priv;
386 struct drm_file *file;
387
388 struct {
389 spinlock_t lock;
390 struct list_head request_list;
Chris Wilsond0bc54f2015-05-21 21:01:48 +0100391/* 20ms is a fairly arbitrary limit (greater than the average frame time)
392 * chosen to prevent the CPU getting more than a frame ahead of the GPU
393 * (when using lax throttling for the frontbuffer). We also use it to
394 * offer free GPU waitboosts for severely congested workloads.
395 */
396#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
Chris Wilsona6f766f2015-04-27 13:41:20 +0100397 } mm;
398 struct idr context_idr;
399
Chris Wilson2e1b8732015-04-27 13:41:22 +0100400 struct intel_rps_client {
401 struct list_head link;
402 unsigned boosts;
403 } rps;
Chris Wilsona6f766f2015-04-27 13:41:20 +0100404
Chris Wilsonc80ff162016-07-27 09:07:27 +0100405 unsigned int bsd_engine;
Chris Wilsona6f766f2015-04-27 13:41:20 +0100406};
407
Daniel Vettere69d0bc2012-11-29 15:59:36 +0100408/* Used by dp and fdi links */
409struct intel_link_m_n {
410 uint32_t tu;
411 uint32_t gmch_m;
412 uint32_t gmch_n;
413 uint32_t link_m;
414 uint32_t link_n;
415};
416
417void intel_link_compute_m_n(int bpp, int nlanes,
418 int pixel_clock, int link_clock,
419 struct intel_link_m_n *m_n);
420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421/* Interface history:
422 *
423 * 1.1: Original.
Dave Airlie0d6aa602006-01-02 20:14:23 +1100424 * 1.2: Add Power Management
425 * 1.3: Add vblank support
Dave Airliede227f52006-01-25 15:31:43 +1100426 * 1.4: Fix cmdbuffer path, add heap destroy
Dave Airlie702880f2006-06-24 17:07:34 +1000427 * 1.5: Add vblank pipe configuration
=?utf-8?q?Michel_D=C3=A4nzer?=2228ed62006-10-25 01:05:09 +1000428 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
429 * - Support vertical blank on secondary display pipe
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 */
431#define DRIVER_MAJOR 1
=?utf-8?q?Michel_D=C3=A4nzer?=2228ed62006-10-25 01:05:09 +1000432#define DRIVER_MINOR 6
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433#define DRIVER_PATCHLEVEL 0
434
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700435struct opregion_header;
436struct opregion_acpi;
437struct opregion_swsci;
438struct opregion_asle;
439
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +0100440struct intel_opregion {
Williams, Dan J115719f2015-10-12 21:12:57 +0000441 struct opregion_header *header;
442 struct opregion_acpi *acpi;
443 struct opregion_swsci *swsci;
Jani Nikulaebde53c2013-09-02 10:38:59 +0300444 u32 swsci_gbda_sub_functions;
445 u32 swsci_sbcb_sub_functions;
Williams, Dan J115719f2015-10-12 21:12:57 +0000446 struct opregion_asle *asle;
Jani Nikula04ebaad2015-12-15 13:18:00 +0200447 void *rvda;
Jani Nikula82730382015-12-14 12:50:52 +0200448 const void *vbt;
Jani Nikulaada8f952015-12-15 13:17:12 +0200449 u32 vbt_size;
Williams, Dan J115719f2015-10-12 21:12:57 +0000450 u32 *lid_state;
Jani Nikula91a60f22013-10-31 18:55:48 +0200451 struct work_struct asle_work;
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +0100452};
Chris Wilson44834a62010-08-19 16:09:23 +0100453#define OPREGION_SIZE (8*1024)
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +0100454
Chris Wilson6ef3d422010-08-04 20:26:07 +0100455struct intel_overlay;
456struct intel_overlay_error_state;
457
Jesse Barnesde151cf2008-11-12 10:03:55 -0800458struct drm_i915_fence_reg {
Chris Wilsona1e5afb2016-08-18 17:16:59 +0100459 struct list_head link;
Chris Wilson49ef5292016-08-18 17:17:00 +0100460 struct drm_i915_private *i915;
461 struct i915_vma *vma;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100462 int pin_count;
Chris Wilson49ef5292016-08-18 17:17:00 +0100463 int id;
464 /**
465 * Whether the tiling parameters for the currently
466 * associated fence register have changed. Note that
467 * for the purposes of tracking tiling changes we also
468 * treat the unfenced register, the register slot that
469 * the object occupies whilst it executes a fenced
470 * command (such as BLT on gen2/3), as a "fence".
471 */
472 bool dirty;
Jesse Barnesde151cf2008-11-12 10:03:55 -0800473};
Dave Airlie7c1c2872008-11-28 14:22:24 +1000474
yakui_zhao9b9d1722009-05-31 17:17:17 +0800475struct sdvo_device_mapping {
Chris Wilsone957d772010-09-24 12:52:03 +0100476 u8 initialized;
yakui_zhao9b9d1722009-05-31 17:17:17 +0800477 u8 dvo_port;
478 u8 slave_addr;
479 u8 dvo_wiring;
Chris Wilsone957d772010-09-24 12:52:03 +0100480 u8 i2c_pin;
Adam Jacksonb1083332010-04-23 16:07:40 -0400481 u8 ddc_pin;
yakui_zhao9b9d1722009-05-31 17:17:17 +0800482};
483
Jani Nikula7bd688c2013-11-08 16:48:56 +0200484struct intel_connector;
Jani Nikula820d2d72014-10-27 16:26:47 +0200485struct intel_encoder;
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200486struct intel_crtc_state;
Damien Lespiau5724dbd2015-01-20 12:51:52 +0000487struct intel_initial_plane_config;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +0100488struct intel_crtc;
Daniel Vetteree9300b2013-06-03 22:40:22 +0200489struct intel_limit;
490struct dpll;
Daniel Vetterb8cecdf2013-03-27 00:44:50 +0100491
Jesse Barnese70236a2009-09-21 10:42:27 -0700492struct drm_i915_display_funcs {
Jesse Barnese70236a2009-09-21 10:42:27 -0700493 int (*get_display_clock_speed)(struct drm_device *dev);
494 int (*get_fifo_size)(struct drm_device *dev, int plane);
Maarten Lankhorste3bddde2016-03-01 11:07:22 +0100495 int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
Matt Ropered4a6a72016-02-23 17:20:13 -0800496 int (*compute_intermediate_wm)(struct drm_device *dev,
497 struct intel_crtc *intel_crtc,
498 struct intel_crtc_state *newstate);
499 void (*initial_watermarks)(struct intel_crtc_state *cstate);
500 void (*optimize_watermarks)(struct intel_crtc_state *cstate);
Matt Roper98d39492016-05-12 07:06:03 -0700501 int (*compute_global_watermarks)(struct drm_atomic_state *state);
Ville Syrjälä46ba6142013-09-10 11:40:40 +0300502 void (*update_wm)(struct drm_crtc *crtc);
Maarten Lankhorst27c329e2015-06-15 12:33:56 +0200503 int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
504 void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
Daniel Vetter0e8ffe12013-03-28 10:42:00 +0100505 /* Returns the active state of the crtc, and if the crtc is active,
506 * fills out the pipe-config with the hw state. */
507 bool (*get_pipe_config)(struct intel_crtc *,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200508 struct intel_crtc_state *);
Damien Lespiau5724dbd2015-01-20 12:51:52 +0000509 void (*get_initial_plane_config)(struct intel_crtc *,
510 struct intel_initial_plane_config *);
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +0200511 int (*crtc_compute_clock)(struct intel_crtc *crtc,
512 struct intel_crtc_state *crtc_state);
Maarten Lankhorst4a806552016-08-09 17:04:01 +0200513 void (*crtc_enable)(struct intel_crtc_state *pipe_config,
514 struct drm_atomic_state *old_state);
515 void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
516 struct drm_atomic_state *old_state);
Lyude896e5bb2016-08-24 07:48:09 +0200517 void (*update_crtcs)(struct drm_atomic_state *state,
518 unsigned int *crtc_vblank_mask);
Jani Nikula69bfe1a2014-10-27 16:26:50 +0200519 void (*audio_codec_enable)(struct drm_connector *connector,
520 struct intel_encoder *encoder,
Ville Syrjälä5e7234c2015-09-25 16:37:43 +0300521 const struct drm_display_mode *adjusted_mode);
Jani Nikula69bfe1a2014-10-27 16:26:50 +0200522 void (*audio_codec_disable)(struct intel_encoder *encoder);
Jesse Barnes674cf962011-04-28 14:27:04 -0700523 void (*fdi_link_train)(struct drm_crtc *crtc);
Jesse Barnes6067aae2011-04-28 15:04:31 -0700524 void (*init_clock_gating)(struct drm_device *dev);
Daniel Vetter5a21b662016-05-24 17:13:53 +0200525 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
526 struct drm_framebuffer *fb,
527 struct drm_i915_gem_object *obj,
528 struct drm_i915_gem_request *req,
529 uint32_t flags);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100530 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
Jesse Barnese70236a2009-09-21 10:42:27 -0700531 /* clock updates for mode set */
532 /* cursor updates */
533 /* render clock increase/decrease */
534 /* display clock increase/decrease */
535 /* pll clock increase/decrease */
Lionel Landwerlin8563b1e2016-03-16 10:57:14 +0000536
Maarten Lankhorstb95c5322016-03-30 17:16:34 +0200537 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
538 void (*load_luts)(struct drm_crtc_state *crtc_state);
Jesse Barnese70236a2009-09-21 10:42:27 -0700539};
540
Mika Kuoppala48c10262015-01-16 11:34:41 +0200541enum forcewake_domain_id {
542 FW_DOMAIN_ID_RENDER = 0,
543 FW_DOMAIN_ID_BLITTER,
544 FW_DOMAIN_ID_MEDIA,
545
546 FW_DOMAIN_ID_COUNT
547};
548
549enum forcewake_domains {
550 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
551 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
552 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA),
553 FORCEWAKE_ALL = (FORCEWAKE_RENDER |
554 FORCEWAKE_BLITTER |
555 FORCEWAKE_MEDIA)
556};
557
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100558#define FW_REG_READ (1)
559#define FW_REG_WRITE (2)
560
561enum forcewake_domains
562intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
563 i915_reg_t reg, unsigned int op);
564
Chris Wilson907b28c2013-07-19 20:36:52 +0100565struct intel_uncore_funcs {
Deepak Sc8d9a592013-11-23 14:55:42 +0530566 void (*force_wake_get)(struct drm_i915_private *dev_priv,
Mika Kuoppala48c10262015-01-16 11:34:41 +0200567 enum forcewake_domains domains);
Deepak Sc8d9a592013-11-23 14:55:42 +0530568 void (*force_wake_put)(struct drm_i915_private *dev_priv,
Mika Kuoppala48c10262015-01-16 11:34:41 +0200569 enum forcewake_domains domains);
Ben Widawsky0b274482013-10-04 21:22:51 -0700570
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200571 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
572 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
573 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
574 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
Ben Widawsky0b274482013-10-04 21:22:51 -0700575
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200576 void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r,
Ben Widawsky0b274482013-10-04 21:22:51 -0700577 uint8_t val, bool trace);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200578 void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r,
Ben Widawsky0b274482013-10-04 21:22:51 -0700579 uint16_t val, bool trace);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200580 void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
Ben Widawsky0b274482013-10-04 21:22:51 -0700581 uint32_t val, bool trace);
Chris Wilson990bbda2012-07-02 11:51:02 -0300582};
583
Chris Wilson907b28c2013-07-19 20:36:52 +0100584struct intel_uncore {
585 spinlock_t lock; /** lock is also taken in irq contexts. */
586
587 struct intel_uncore_funcs funcs;
588
589 unsigned fifo_count;
Mika Kuoppala48c10262015-01-16 11:34:41 +0200590 enum forcewake_domains fw_domains;
Chris Wilsonaec347a2013-08-26 13:46:09 +0100591
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200592 struct intel_uncore_forcewake_domain {
593 struct drm_i915_private *i915;
Mika Kuoppala48c10262015-01-16 11:34:41 +0200594 enum forcewake_domain_id id;
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +0100595 enum forcewake_domains mask;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200596 unsigned wake_count;
Tvrtko Ursulina57a4a62016-04-07 17:04:32 +0100597 struct hrtimer timer;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200598 i915_reg_t reg_set;
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200599 u32 val_set;
600 u32 val_clear;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200601 i915_reg_t reg_ack;
602 i915_reg_t reg_post;
Mika Kuoppala05a2fb12015-01-19 16:20:43 +0200603 u32 val_reset;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200604 } fw_domain[FW_DOMAIN_ID_COUNT];
Mika Kuoppala75714942015-12-16 09:26:48 +0200605
606 int unclaimed_mmio_check;
Chris Wilson907b28c2013-07-19 20:36:52 +0100607};
608
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200609/* Iterate over initialised fw domains */
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +0100610#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \
611 for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
612 (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \
613 (domain__)++) \
614 for_each_if ((mask__) & (domain__)->mask)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200615
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +0100616#define for_each_fw_domain(domain__, dev_priv__) \
617 for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +0200618
Damien Lespiaub6e7d892015-10-27 14:46:59 +0200619#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
620#define CSR_VERSION_MAJOR(version) ((version) >> 16)
621#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
622
Daniel Vettereb805622015-05-04 14:58:44 +0200623struct intel_csr {
Daniel Vetter8144ac52015-10-28 23:59:04 +0200624 struct work_struct work;
Daniel Vettereb805622015-05-04 14:58:44 +0200625 const char *fw_path;
Animesh Mannaa7f749f2015-08-03 21:55:32 +0530626 uint32_t *dmc_payload;
Daniel Vettereb805622015-05-04 14:58:44 +0200627 uint32_t dmc_fw_size;
Damien Lespiaub6e7d892015-10-27 14:46:59 +0200628 uint32_t version;
Daniel Vettereb805622015-05-04 14:58:44 +0200629 uint32_t mmio_count;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200630 i915_reg_t mmioaddr[8];
Daniel Vettereb805622015-05-04 14:58:44 +0200631 uint32_t mmiodata[8];
Patrik Jakobsson832dba82016-02-18 17:21:11 +0200632 uint32_t dc_state;
Imre Deaka37baf32016-02-29 22:49:03 +0200633 uint32_t allowed_dc_mask;
Daniel Vettereb805622015-05-04 14:58:44 +0200634};
635
Damien Lespiau79fc46d2013-04-23 16:37:17 +0100636#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
637 func(is_mobile) sep \
638 func(is_i85x) sep \
639 func(is_i915g) sep \
640 func(is_i945gm) sep \
641 func(is_g33) sep \
642 func(need_gfx_hws) sep \
643 func(is_g4x) sep \
644 func(is_pineview) sep \
645 func(is_broadwater) sep \
646 func(is_crestline) sep \
647 func(is_ivybridge) sep \
648 func(is_valleyview) sep \
Wayne Boyer666a4532015-12-09 12:29:35 -0800649 func(is_cherryview) sep \
Damien Lespiau79fc46d2013-04-23 16:37:17 +0100650 func(is_haswell) sep \
Tvrtko Ursulinab0d24a2016-05-10 10:57:05 +0100651 func(is_broadwell) sep \
Satheeshakrishna M7201c0b2014-04-02 11:24:50 +0530652 func(is_skylake) sep \
Rodrigo Vivi7526ac12015-10-27 10:14:54 -0700653 func(is_broxton) sep \
Rodrigo Vivief11bdb2015-10-28 04:16:45 -0700654 func(is_kabylake) sep \
Ben Widawskyb833d682013-08-23 16:00:07 -0700655 func(is_preliminary) sep \
Damien Lespiau79fc46d2013-04-23 16:37:17 +0100656 func(has_fbc) sep \
Carlos Santa6e3b84d2016-08-17 12:30:36 -0700657 func(has_psr) sep \
Carlos Santa4aa4c232016-08-17 12:30:39 -0700658 func(has_runtime_pm) sep \
Damien Lespiau79fc46d2013-04-23 16:37:17 +0100659 func(has_pipe_cxsr) sep \
660 func(has_hotplug) sep \
661 func(cursor_needs_physical) sep \
662 func(has_overlay) sep \
663 func(overlay_needs_physical) sep \
664 func(supports_tv) sep \
Damien Lespiaudd93be52013-04-22 18:40:39 +0100665 func(has_llc) sep \
Tvrtko Ursulinca377802016-03-02 12:10:31 +0000666 func(has_snoop) sep \
Damien Lespiau30568c42013-04-22 18:40:41 +0100667 func(has_ddi) sep \
arun.siluvery@linux.intel.com33e141e2016-06-03 06:34:33 +0100668 func(has_fpga_dbg) sep \
669 func(has_pooled_eu)
Daniel Vetterc96ea642012-08-08 22:01:51 +0200670
Damien Lespiaua587f772013-04-22 18:40:38 +0100671#define DEFINE_FLAG(name) u8 name:1
672#define SEP_SEMICOLON ;
Eugeni Dodonov3d29b842012-01-17 14:43:53 -0200673
Imre Deak915490d2016-08-31 19:13:01 +0300674struct sseu_dev_info {
Imre Deakf08a0c92016-08-31 19:13:04 +0300675 u8 slice_mask;
Imre Deak57ec1712016-08-31 19:13:05 +0300676 u8 subslice_mask;
Imre Deak915490d2016-08-31 19:13:01 +0300677 u8 eu_total;
678 u8 eu_per_subslice;
Imre Deak43b67992016-08-31 19:13:02 +0300679 u8 min_eu_in_pool;
680 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
681 u8 subslice_7eu[3];
682 u8 has_slice_pg:1;
683 u8 has_subslice_pg:1;
684 u8 has_eu_pg:1;
Imre Deak915490d2016-08-31 19:13:01 +0300685};
686
Imre Deak57ec1712016-08-31 19:13:05 +0300687static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
688{
689 return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
690}
691
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700692struct intel_device_info {
Ville Syrjälä10fce672013-01-24 15:29:28 +0200693 u32 display_mmio_offset;
Chris Wilson87f1f462014-08-09 19:18:42 +0100694 u16 device_id;
Tvrtko Ursulinac208a82016-05-10 10:57:07 +0100695 u8 num_pipes;
Damien Lespiaud615a162014-03-03 17:31:48 +0000696 u8 num_sprites[I915_MAX_PIPES];
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700697 u8 gen;
Tvrtko Ursulinae5702d2016-05-10 10:57:04 +0100698 u16 gen_mask;
Ben Widawsky73ae4782013-10-15 10:02:57 -0700699 u8 ring_mask; /* Rings supported by the HW */
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +0100700 u8 num_rings;
Damien Lespiaua587f772013-04-22 18:40:38 +0100701 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
Antti Koskipaaa57c7742014-02-04 14:22:24 +0200702 /* Register offsets for the various display pipes and transcoders */
703 int pipe_offsets[I915_MAX_TRANSCODERS];
704 int trans_offsets[I915_MAX_TRANSCODERS];
Antti Koskipaaa57c7742014-02-04 14:22:24 +0200705 int palette_offsets[I915_MAX_PIPES];
Ville Syrjälä5efb3e22014-04-09 13:28:53 +0300706 int cursor_offsets[I915_MAX_PIPES];
Jeff McGee38732182015-02-13 10:27:54 -0600707
708 /* Slice/subslice/EU info */
Imre Deak43b67992016-08-31 19:13:02 +0300709 struct sseu_dev_info sseu;
Lionel Landwerlin82cf4352016-03-16 10:57:16 +0000710
711 struct color_luts {
712 u16 degamma_lut_size;
713 u16 gamma_lut_size;
714 } color;
Kristian Høgsbergcfdf1fa2009-12-16 15:16:16 -0500715};
716
Damien Lespiaua587f772013-04-22 18:40:38 +0100717#undef DEFINE_FLAG
718#undef SEP_SEMICOLON
719
Chris Wilson2bd160a2016-08-15 10:48:45 +0100720struct intel_display_error_state;
721
722struct drm_i915_error_state {
723 struct kref ref;
724 struct timeval time;
725
726 char error_msg[128];
727 bool simulated;
728 int iommu;
729 u32 reset_count;
730 u32 suspend_count;
731 struct intel_device_info device_info;
732
733 /* Generic register state */
734 u32 eir;
735 u32 pgtbl_er;
736 u32 ier;
737 u32 gtier[4];
738 u32 ccid;
739 u32 derrmr;
740 u32 forcewake;
741 u32 error; /* gen6+ */
742 u32 err_int; /* gen7 */
743 u32 fault_data0; /* gen8, gen9 */
744 u32 fault_data1; /* gen8, gen9 */
745 u32 done_reg;
746 u32 gac_eco;
747 u32 gam_ecochk;
748 u32 gab_ctl;
749 u32 gfx_mode;
750 u32 extra_instdone[I915_NUM_INSTDONE_REG];
751 u64 fence[I915_MAX_NUM_FENCES];
752 struct intel_overlay_error_state *overlay;
753 struct intel_display_error_state *display;
Chris Wilson51d545d2016-08-15 10:49:02 +0100754 struct drm_i915_error_object *semaphore;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100755
756 struct drm_i915_error_engine {
757 int engine_id;
758 /* Software tracked state */
759 bool waiting;
760 int num_waiters;
761 int hangcheck_score;
762 enum intel_engine_hangcheck_action hangcheck_action;
763 struct i915_address_space *vm;
764 int num_requests;
765
766 /* our own tracking of ring head and tail */
767 u32 cpu_ring_head;
768 u32 cpu_ring_tail;
769
770 u32 last_seqno;
771 u32 semaphore_seqno[I915_NUM_ENGINES - 1];
772
773 /* Register state */
774 u32 start;
775 u32 tail;
776 u32 head;
777 u32 ctl;
Chris Wilson21a2c582016-08-15 10:49:11 +0100778 u32 mode;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100779 u32 hws;
780 u32 ipeir;
781 u32 ipehr;
782 u32 instdone;
783 u32 bbstate;
784 u32 instpm;
785 u32 instps;
786 u32 seqno;
787 u64 bbaddr;
788 u64 acthd;
789 u32 fault_reg;
790 u64 faddr;
791 u32 rc_psmi; /* sleep state */
792 u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
793
794 struct drm_i915_error_object {
795 int page_count;
796 u64 gtt_offset;
Chris Wilson03382df2016-08-15 10:49:09 +0100797 u64 gtt_size;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100798 u32 *pages[0];
799 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
800
801 struct drm_i915_error_object *wa_ctx;
802
803 struct drm_i915_error_request {
804 long jiffies;
Chris Wilsonc84455b2016-08-15 10:49:08 +0100805 pid_t pid;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100806 u32 seqno;
807 u32 head;
808 u32 tail;
809 } *requests;
810
811 struct drm_i915_error_waiter {
812 char comm[TASK_COMM_LEN];
813 pid_t pid;
814 u32 seqno;
815 } *waiters;
816
817 struct {
818 u32 gfx_mode;
819 union {
820 u64 pdp[4];
821 u32 pp_dir_base;
822 };
823 } vm_info;
824
825 pid_t pid;
826 char comm[TASK_COMM_LEN];
827 } engine[I915_NUM_ENGINES];
828
829 struct drm_i915_error_buffer {
830 u32 size;
831 u32 name;
832 u32 rseqno[I915_NUM_ENGINES], wseqno;
833 u64 gtt_offset;
834 u32 read_domains;
835 u32 write_domain;
836 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
837 u32 tiling:2;
838 u32 dirty:1;
839 u32 purgeable:1;
840 u32 userptr:1;
841 s32 engine:4;
842 u32 cache_level:3;
843 } *active_bo[I915_NUM_ENGINES], *pinned_bo;
844 u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
845 struct i915_address_space *active_vm[I915_NUM_ENGINES];
846};
847
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800848enum i915_cache_level {
849 I915_CACHE_NONE = 0,
Chris Wilson350ec882013-08-06 13:17:02 +0100850 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
851 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
852 caches, eg sampler/render caches, and the
853 large Last-Level-Cache. LLC is coherent with
854 the CPU, but L3 is only visible to the GPU. */
Chris Wilson651d7942013-08-08 14:41:10 +0100855 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800856};
857
Mika Kuoppalae59ec132013-06-12 12:35:28 +0300858struct i915_ctx_hang_stats {
859 /* This context had batch pending when hang was declared */
860 unsigned batch_pending;
861
862 /* This context had batch active when hang was declared */
863 unsigned batch_active;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +0300864
865 /* Time when this context was last blamed for a GPU reset */
866 unsigned long guilty_ts;
867
Chris Wilson676fa572014-12-24 08:13:39 -0800868 /* If the contexts causes a second GPU hang within this time,
869 * it is permanently banned from submitting any more work.
870 */
871 unsigned long ban_period_seconds;
872
Mika Kuoppalabe62acb2013-08-30 16:19:28 +0300873 /* This context is banned to submit more work */
874 bool banned;
Mika Kuoppalae59ec132013-06-12 12:35:28 +0300875};
Ben Widawsky40521052012-06-04 14:42:43 -0700876
877/* This must match up with the value previously used for execbuf2.rsvd1. */
Oscar Mateo821d66d2014-07-03 16:28:00 +0100878#define DEFAULT_CONTEXT_HANDLE 0
David Weinehallb1b38272015-05-20 17:00:13 +0300879
Oscar Mateo31b7a882014-07-03 16:28:01 +0100880/**
Chris Wilsone2efd132016-05-24 14:53:34 +0100881 * struct i915_gem_context - as the name implies, represents a context.
Oscar Mateo31b7a882014-07-03 16:28:01 +0100882 * @ref: reference count.
883 * @user_handle: userspace tracking identity for this context.
884 * @remap_slice: l3 row remapping information.
David Weinehallb1b38272015-05-20 17:00:13 +0300885 * @flags: context specific flags:
886 * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
Oscar Mateo31b7a882014-07-03 16:28:01 +0100887 * @file_priv: filp associated with this context (NULL for global default
888 * context).
889 * @hang_stats: information about the role of this context in possible GPU
890 * hangs.
Tvrtko Ursulin7df113e2015-04-17 12:49:07 +0100891 * @ppgtt: virtual memory space used by this context.
Oscar Mateo31b7a882014-07-03 16:28:01 +0100892 * @legacy_hw_ctx: render context backing object and whether it is correctly
893 * initialized (legacy ring submission mechanism only).
894 * @link: link in the global list of contexts.
895 *
896 * Contexts are memory images used by the hardware to store copies of their
897 * internal state.
898 */
Chris Wilsone2efd132016-05-24 14:53:34 +0100899struct i915_gem_context {
Mika Kuoppaladce32712013-04-30 13:30:33 +0300900 struct kref ref;
Chris Wilson9ea4fee2015-05-05 09:17:29 +0100901 struct drm_i915_private *i915;
Ben Widawsky40521052012-06-04 14:42:43 -0700902 struct drm_i915_file_private *file_priv;
Daniel Vetterae6c4802014-08-06 15:04:53 +0200903 struct i915_hw_ppgtt *ppgtt;
Chris Wilsonc84455b2016-08-15 10:49:08 +0100904 struct pid *pid;
Ben Widawskya33afea2013-09-17 21:12:45 -0700905
Chris Wilson8d59bc62016-05-24 14:53:42 +0100906 struct i915_ctx_hang_stats hang_stats;
907
Chris Wilson8d59bc62016-05-24 14:53:42 +0100908 unsigned long flags;
Chris Wilsonbc3d6742016-07-04 08:08:39 +0100909#define CONTEXT_NO_ZEROMAP BIT(0)
910#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
Dave Gordon0be81152016-08-19 15:23:42 +0100911
912 /* Unique identifier for this context, used by the hw for tracking */
913 unsigned int hw_id;
Chris Wilson8d59bc62016-05-24 14:53:42 +0100914 u32 user_handle;
Chris Wilson5d1808e2016-04-28 09:56:51 +0100915
Chris Wilson0cb26a82016-06-24 14:55:53 +0100916 u32 ggtt_alignment;
917
Chris Wilson9021ad02016-05-24 14:53:37 +0100918 struct intel_context {
Chris Wilsonbf3783e2016-08-15 10:48:54 +0100919 struct i915_vma *state;
Chris Wilson7e37f882016-08-02 22:50:21 +0100920 struct intel_ring *ring;
Tvrtko Ursulin82352e92016-01-15 17:12:45 +0000921 uint32_t *lrc_reg_state;
Chris Wilson8d59bc62016-05-24 14:53:42 +0100922 u64 lrc_desc;
923 int pin_count;
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100924 bool initialised;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000925 } engine[I915_NUM_ENGINES];
Zhi Wangbcd794c2016-06-16 08:07:01 -0400926 u32 ring_size;
Zhi Wangc01fc532016-06-16 08:07:02 -0400927 u32 desc_template;
Zhi Wang3c7ba632016-06-16 08:07:03 -0400928 struct atomic_notifier_head status_notifier;
Zhi Wang80a9a8d2016-06-16 08:07:04 -0400929 bool execlists_force_single_submission;
Oscar Mateoc9e003a2014-07-24 17:04:13 +0100930
Ben Widawskya33afea2013-09-17 21:12:45 -0700931 struct list_head link;
Chris Wilson8d59bc62016-05-24 14:53:42 +0100932
933 u8 remap_slice;
Chris Wilson50e046b2016-08-04 07:52:46 +0100934 bool closed:1;
Ben Widawsky40521052012-06-04 14:42:43 -0700935};
936
Paulo Zanonia4001f12015-02-13 17:23:44 -0200937enum fb_op_origin {
938 ORIGIN_GTT,
939 ORIGIN_CPU,
940 ORIGIN_CS,
941 ORIGIN_FLIP,
Paulo Zanoni74b4ea12015-07-14 16:29:14 -0300942 ORIGIN_DIRTYFB,
Paulo Zanonia4001f12015-02-13 17:23:44 -0200943};
944
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200945struct intel_fbc {
Paulo Zanoni25ad93f2015-07-02 19:25:10 -0300946 /* This is always the inner lock when overlapping with struct_mutex and
947 * it's the outer lock when overlapping with stolen_lock. */
948 struct mutex lock;
Ben Widawsky5e59f712014-06-30 10:41:24 -0700949 unsigned threshold;
Paulo Zanonidbef0f12015-02-13 17:23:46 -0200950 unsigned int possible_framebuffer_bits;
951 unsigned int busy_bits;
Paulo Zanoni010cf732016-01-19 11:35:48 -0200952 unsigned int visible_pipes_mask;
Paulo Zanonie35fef22015-02-09 14:46:29 -0200953 struct intel_crtc *crtc;
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700954
Ben Widawskyc4213882014-06-19 12:06:10 -0700955 struct drm_mm_node compressed_fb;
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700956 struct drm_mm_node *compressed_llb;
957
Rodrigo Vivida46f932014-08-01 02:04:45 -0700958 bool false_color;
959
Paulo Zanonid029bca2015-10-15 10:44:46 -0300960 bool enabled;
Paulo Zanoni0e631ad2015-10-14 17:45:36 -0300961 bool active;
Paulo Zanoni9adccc62014-09-19 16:04:55 -0300962
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200963 struct intel_fbc_state_cache {
964 struct {
965 unsigned int mode_flags;
966 uint32_t hsw_bdw_pixel_rate;
967 } crtc;
968
969 struct {
970 unsigned int rotation;
971 int src_w;
972 int src_h;
973 bool visible;
974 } plane;
975
976 struct {
977 u64 ilk_ggtt_offset;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200978 uint32_t pixel_format;
979 unsigned int stride;
980 int fence_reg;
981 unsigned int tiling_mode;
982 } fb;
983 } state_cache;
984
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200985 struct intel_fbc_reg_params {
986 struct {
987 enum pipe pipe;
988 enum plane plane;
989 unsigned int fence_y_offset;
990 } crtc;
991
992 struct {
993 u64 ggtt_offset;
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200994 uint32_t pixel_format;
995 unsigned int stride;
996 int fence_reg;
997 } fb;
998
999 int cfb_size;
1000 } params;
1001
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -07001002 struct intel_fbc_work {
Paulo Zanoni128d7352015-10-26 16:27:49 -02001003 bool scheduled;
Paulo Zanonica18d512016-01-21 18:03:05 -02001004 u32 scheduled_vblank;
Paulo Zanoni128d7352015-10-26 16:27:49 -02001005 struct work_struct work;
Paulo Zanoni128d7352015-10-26 16:27:49 -02001006 } work;
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -07001007
Paulo Zanonibf6189c2015-10-27 14:50:03 -02001008 const char *no_fbc_reason;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001009};
1010
Vandana Kannan96178ee2015-01-10 02:25:56 +05301011/**
1012 * HIGH_RR is the highest eDP panel refresh rate read from EDID
1013 * LOW_RR is the lowest eDP panel refresh rate found from EDID
1014 * parsing for same resolution.
1015 */
1016enum drrs_refresh_rate_type {
1017 DRRS_HIGH_RR,
1018 DRRS_LOW_RR,
1019 DRRS_MAX_RR, /* RR count */
1020};
1021
1022enum drrs_support_type {
1023 DRRS_NOT_SUPPORTED = 0,
1024 STATIC_DRRS_SUPPORT = 1,
1025 SEAMLESS_DRRS_SUPPORT = 2
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301026};
1027
Daniel Vetter2807cf62014-07-11 10:30:11 -07001028struct intel_dp;
Vandana Kannan96178ee2015-01-10 02:25:56 +05301029struct i915_drrs {
1030 struct mutex mutex;
1031 struct delayed_work work;
1032 struct intel_dp *dp;
1033 unsigned busy_frontbuffer_bits;
1034 enum drrs_refresh_rate_type refresh_rate_type;
1035 enum drrs_support_type type;
1036};
1037
Rodrigo Vivia031d702013-10-03 16:15:06 -03001038struct i915_psr {
Daniel Vetterf0355c42014-07-11 10:30:15 -07001039 struct mutex lock;
Rodrigo Vivia031d702013-10-03 16:15:06 -03001040 bool sink_support;
1041 bool source_ok;
Daniel Vetter2807cf62014-07-11 10:30:11 -07001042 struct intel_dp *enabled;
Rodrigo Vivi7c8f8a72014-06-13 05:10:03 -07001043 bool active;
1044 struct delayed_work work;
Daniel Vetter9ca15302014-07-11 10:30:16 -07001045 unsigned busy_frontbuffer_bits;
Sonika Jindal474d1ec2015-04-02 11:02:44 +05301046 bool psr2_support;
1047 bool aux_frame_sync;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08001048 bool link_standby;
Rodrigo Vivi3f51e472013-07-11 18:45:00 -03001049};
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -07001050
Zhenyu Wang3bad0782010-04-07 16:15:53 +08001051enum intel_pch {
Paulo Zanonif0350832012-07-03 18:48:16 -03001052 PCH_NONE = 0, /* No PCH present */
Zhenyu Wang3bad0782010-04-07 16:15:53 +08001053 PCH_IBX, /* Ibexpeak PCH */
1054 PCH_CPT, /* Cougarpoint PCH */
Eugeni Dodonoveb877eb2012-03-29 12:32:20 -03001055 PCH_LPT, /* Lynxpoint PCH */
Satheeshakrishna Me7e7ea22014-04-09 11:08:57 +05301056 PCH_SPT, /* Sunrisepoint PCH */
Rodrigo Vivi22dea0b2016-07-01 17:07:12 -07001057 PCH_KBP, /* Kabypoint PCH */
Ben Widawsky40c7ead2013-04-05 13:12:40 -07001058 PCH_NOP,
Zhenyu Wang3bad0782010-04-07 16:15:53 +08001059};
1060
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02001061enum intel_sbi_destination {
1062 SBI_ICLK,
1063 SBI_MPHY,
1064};
1065
Jesse Barnesb690e962010-07-19 13:53:12 -07001066#define QUIRK_PIPEA_FORCE (1<<0)
Keith Packard435793d2011-07-12 14:56:22 -07001067#define QUIRK_LVDS_SSC_DISABLE (1<<1)
Carsten Emde4dca20e2012-03-15 15:56:26 +01001068#define QUIRK_INVERT_BRIGHTNESS (1<<2)
Scot Doyle9c72cc62014-07-03 23:27:50 +00001069#define QUIRK_BACKLIGHT_PRESENT (1<<3)
Ville Syrjäläb6b5d042014-08-15 01:22:07 +03001070#define QUIRK_PIPEB_FORCE (1<<4)
Daniel Vetter656bfa32014-11-20 09:26:30 +01001071#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
Jesse Barnesb690e962010-07-19 13:53:12 -07001072
Dave Airlie8be48d92010-03-30 05:34:14 +00001073struct intel_fbdev;
Chris Wilson1630fe72011-07-08 12:22:42 +01001074struct intel_fbc_work;
Dave Airlie38651672010-03-30 05:34:13 +00001075
Daniel Vetterc2b91522012-02-14 22:37:19 +01001076struct intel_gmbus {
1077 struct i2c_adapter adapter;
Ville Syrjälä3e4d44e2016-03-07 17:56:59 +02001078#define GMBUS_FORCE_BIT_RETRY (1U << 31)
Chris Wilsonf2ce9fa2012-11-10 15:58:21 +00001079 u32 force_bit;
Daniel Vetterc2b91522012-02-14 22:37:19 +01001080 u32 reg0;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001081 i915_reg_t gpio_reg;
Daniel Vetterc167a6f2012-02-28 00:43:09 +01001082 struct i2c_algo_bit_data bit_algo;
Daniel Vetterc2b91522012-02-14 22:37:19 +01001083 struct drm_i915_private *dev_priv;
1084};
1085
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001086struct i915_suspend_saved_registers {
Keith Packarde948e992008-05-07 12:27:53 +10001087 u32 saveDSPARB;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001088 u32 saveFBC_CONTROL;
Keith Packard1f84e552008-02-16 19:19:29 -08001089 u32 saveCACHE_MODE_0;
Keith Packard1f84e552008-02-16 19:19:29 -08001090 u32 saveMI_ARB_STATE;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001091 u32 saveSWF0[16];
1092 u32 saveSWF1[16];
Ville Syrjälä85fa7922015-09-18 20:03:43 +03001093 u32 saveSWF3[3];
Daniel Vetter4b9de732011-10-09 21:52:02 +02001094 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
Adam Jacksoncda2bb72011-07-26 16:53:06 -04001095 u32 savePCH_PORT_HOTPLUG;
Jesse Barnes9f49c372014-12-10 12:16:05 -08001096 u16 saveGCDGMBUS;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001097};
Daniel Vetterc85aa882012-11-02 19:55:03 +01001098
Imre Deakddeea5b2014-05-05 15:19:56 +03001099struct vlv_s0ix_state {
1100 /* GAM */
1101 u32 wr_watermark;
1102 u32 gfx_prio_ctrl;
1103 u32 arb_mode;
1104 u32 gfx_pend_tlb0;
1105 u32 gfx_pend_tlb1;
1106 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
1107 u32 media_max_req_count;
1108 u32 gfx_max_req_count;
1109 u32 render_hwsp;
1110 u32 ecochk;
1111 u32 bsd_hwsp;
1112 u32 blt_hwsp;
1113 u32 tlb_rd_addr;
1114
1115 /* MBC */
1116 u32 g3dctl;
1117 u32 gsckgctl;
1118 u32 mbctl;
1119
1120 /* GCP */
1121 u32 ucgctl1;
1122 u32 ucgctl3;
1123 u32 rcgctl1;
1124 u32 rcgctl2;
1125 u32 rstctl;
1126 u32 misccpctl;
1127
1128 /* GPM */
1129 u32 gfxpause;
1130 u32 rpdeuhwtc;
1131 u32 rpdeuc;
1132 u32 ecobus;
1133 u32 pwrdwnupctl;
1134 u32 rp_down_timeout;
1135 u32 rp_deucsw;
1136 u32 rcubmabdtmr;
1137 u32 rcedata;
1138 u32 spare2gh;
1139
1140 /* Display 1 CZ domain */
1141 u32 gt_imr;
1142 u32 gt_ier;
1143 u32 pm_imr;
1144 u32 pm_ier;
1145 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
1146
1147 /* GT SA CZ domain */
1148 u32 tilectl;
1149 u32 gt_fifoctl;
1150 u32 gtlc_wake_ctrl;
1151 u32 gtlc_survive;
1152 u32 pmwgicz;
1153
1154 /* Display 2 CZ domain */
1155 u32 gu_ctl0;
1156 u32 gu_ctl1;
Jesse Barnes9c252102015-04-01 14:22:57 -07001157 u32 pcbr;
Imre Deakddeea5b2014-05-05 15:19:56 +03001158 u32 clock_gate_dis2;
1159};
1160
Chris Wilsonbf225f22014-07-10 20:31:18 +01001161struct intel_rps_ei {
1162 u32 cz_clock;
1163 u32 render_c0;
1164 u32 media_c0;
Deepak S31685c22014-07-03 17:33:01 -04001165};
1166
Daniel Vetterc85aa882012-11-02 19:55:03 +01001167struct intel_gen6_power_mgmt {
Imre Deakd4d70aa2014-11-19 15:30:04 +02001168 /*
1169 * work, interrupts_enabled and pm_iir are protected by
1170 * dev_priv->irq_lock
1171 */
Daniel Vetterc85aa882012-11-02 19:55:03 +01001172 struct work_struct work;
Imre Deakd4d70aa2014-11-19 15:30:04 +02001173 bool interrupts_enabled;
Daniel Vetterc85aa882012-11-02 19:55:03 +01001174 u32 pm_iir;
Daniel Vetter59cdb632013-07-04 23:35:28 +02001175
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05301176 u32 pm_intr_keep;
1177
Ben Widawskyb39fb292014-03-19 18:31:11 -07001178 /* Frequencies are stored in potentially platform dependent multiples.
1179 * In other words, *_freq needs to be multiplied by X to be interesting.
1180 * Soft limits are those which are used for the dynamic reclocking done
1181 * by the driver (raise frequencies under heavy loads, and lower for
1182 * lighter loads). Hard limits are those imposed by the hardware.
1183 *
1184 * A distinction is made for overclocking, which is never enabled by
1185 * default, and is considered to be above the hard limit if it's
1186 * possible at all.
1187 */
1188 u8 cur_freq; /* Current frequency (cached, may not == HW) */
1189 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
1190 u8 max_freq_softlimit; /* Max frequency permitted by the driver */
1191 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
1192 u8 min_freq; /* AKA RPn. Minimum frequency */
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001193 u8 boost_freq; /* Frequency to request when wait boosting */
Chris Wilsonaed242f2015-03-18 09:48:21 +00001194 u8 idle_freq; /* Frequency to request when we are idle */
Ben Widawskyb39fb292014-03-19 18:31:11 -07001195 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
1196 u8 rp1_freq; /* "less than" RP0 power/freqency */
1197 u8 rp0_freq; /* Non-overclocked max frequency. */
Ville Syrjäläc30fec62016-03-04 21:43:02 +02001198 u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
Jesse Barnes1a01ab32012-11-02 11:14:00 -07001199
Chris Wilson8fb55192015-04-07 16:20:28 +01001200 u8 up_threshold; /* Current %busy required to uplock */
1201 u8 down_threshold; /* Current %busy required to downclock */
1202
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001203 int last_adj;
1204 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
1205
Chris Wilson8d3afd72015-05-21 21:01:47 +01001206 spinlock_t client_lock;
1207 struct list_head clients;
1208 bool client_boost;
1209
Chris Wilsonc0951f02013-10-10 21:58:50 +01001210 bool enabled;
Chris Wilson54b4f682016-07-21 21:16:19 +01001211 struct delayed_work autoenable_work;
Chris Wilson1854d5c2015-04-07 16:20:32 +01001212 unsigned boosts;
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001213
Chris Wilsonbf225f22014-07-10 20:31:18 +01001214 /* manual wa residency calculations */
1215 struct intel_rps_ei up_ei, down_ei;
1216
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001217 /*
1218 * Protects RPS/RC6 register access and PCU communication.
Chris Wilson8d3afd72015-05-21 21:01:47 +01001219 * Must be taken after struct_mutex if nested. Note that
1220 * this lock may be held for long periods of time when
1221 * talking to hw - so only take it when talking to hw!
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001222 */
1223 struct mutex hw_lock;
Daniel Vetterc85aa882012-11-02 19:55:03 +01001224};
1225
Daniel Vetter1a240d42012-11-29 22:18:51 +01001226/* defined intel_pm.c */
1227extern spinlock_t mchdev_lock;
1228
Daniel Vetterc85aa882012-11-02 19:55:03 +01001229struct intel_ilk_power_mgmt {
1230 u8 cur_delay;
1231 u8 min_delay;
1232 u8 max_delay;
1233 u8 fmax;
1234 u8 fstart;
1235
1236 u64 last_count1;
1237 unsigned long last_time1;
1238 unsigned long chipset_power;
1239 u64 last_count2;
Thomas Gleixner5ed0bdf2014-07-16 21:05:06 +00001240 u64 last_time2;
Daniel Vetterc85aa882012-11-02 19:55:03 +01001241 unsigned long gfx_power;
1242 u8 corr;
1243
1244 int c_m;
1245 int r_t;
1246};
1247
Imre Deakc6cb5822014-03-04 19:22:55 +02001248struct drm_i915_private;
1249struct i915_power_well;
1250
1251struct i915_power_well_ops {
1252 /*
1253 * Synchronize the well's hw state to match the current sw state, for
1254 * example enable/disable it based on the current refcount. Called
1255 * during driver init and resume time, possibly after first calling
1256 * the enable/disable handlers.
1257 */
1258 void (*sync_hw)(struct drm_i915_private *dev_priv,
1259 struct i915_power_well *power_well);
1260 /*
1261 * Enable the well and resources that depend on it (for example
1262 * interrupts located on the well). Called after the 0->1 refcount
1263 * transition.
1264 */
1265 void (*enable)(struct drm_i915_private *dev_priv,
1266 struct i915_power_well *power_well);
1267 /*
1268 * Disable the well and resources that depend on it. Called after
1269 * the 1->0 refcount transition.
1270 */
1271 void (*disable)(struct drm_i915_private *dev_priv,
1272 struct i915_power_well *power_well);
1273 /* Returns the hw enabled state. */
1274 bool (*is_enabled)(struct drm_i915_private *dev_priv,
1275 struct i915_power_well *power_well);
1276};
1277
Wang Xingchaoa38911a2013-05-30 22:07:11 +08001278/* Power well structure for haswell */
1279struct i915_power_well {
Imre Deakc1ca7272013-11-25 17:15:29 +02001280 const char *name;
Imre Deak6f3ef5d2013-11-25 17:15:30 +02001281 bool always_on;
Wang Xingchaoa38911a2013-05-30 22:07:11 +08001282 /* power well enable/disable usage count */
1283 int count;
Imre Deakbfafe932014-06-05 20:31:47 +03001284 /* cached hw enabled state */
1285 bool hw_enabled;
Imre Deakc1ca7272013-11-25 17:15:29 +02001286 unsigned long domains;
Imre Deak77961eb2014-03-05 16:20:56 +02001287 unsigned long data;
Imre Deakc6cb5822014-03-04 19:22:55 +02001288 const struct i915_power_well_ops *ops;
Wang Xingchaoa38911a2013-05-30 22:07:11 +08001289};
1290
Imre Deak83c00f52013-10-25 17:36:47 +03001291struct i915_power_domains {
Imre Deakbaa70702013-10-25 17:36:48 +03001292 /*
1293 * Power wells needed for initialization at driver init and suspend
1294 * time are on. They are kept on until after the first modeset.
1295 */
1296 bool init_power_on;
Imre Deak0d116a22014-04-25 13:19:05 +03001297 bool initializing;
Imre Deakc1ca7272013-11-25 17:15:29 +02001298 int power_well_count;
Imre Deakbaa70702013-10-25 17:36:48 +03001299
Imre Deak83c00f52013-10-25 17:36:47 +03001300 struct mutex lock;
Imre Deak1da51582013-11-25 17:15:35 +02001301 int domain_use_count[POWER_DOMAIN_NUM];
Imre Deakc1ca7272013-11-25 17:15:29 +02001302 struct i915_power_well *power_wells;
Imre Deak83c00f52013-10-25 17:36:47 +03001303};
1304
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001305#define MAX_L3_SLICES 2
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001306struct intel_l3_parity {
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001307 u32 *remap_info[MAX_L3_SLICES];
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001308 struct work_struct error_work;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001309 int which_slice;
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001310};
1311
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001312struct i915_gem_mm {
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001313 /** Memory allocator for GTT stolen memory */
1314 struct drm_mm stolen;
Paulo Zanoni92e97d22015-07-02 19:25:09 -03001315 /** Protects the usage of the GTT stolen memory allocator. This is
1316 * always the inner lock when overlapping with struct_mutex. */
1317 struct mutex stolen_lock;
1318
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001319 /** List of all objects in gtt_space. Used to restore gtt
1320 * mappings on resume */
1321 struct list_head bound_list;
1322 /**
1323 * List of objects which are not bound to the GTT (thus
1324 * are idle and not used by the GPU) but still have
1325 * (presumably uncached) pages still attached.
1326 */
1327 struct list_head unbound_list;
1328
1329 /** Usable portion of the GTT for GEM */
1330 unsigned long stolen_base; /* limited to low memory (32-bit) */
1331
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001332 /** PPGTT used for aliasing the PPGTT with the GTT */
1333 struct i915_hw_ppgtt *aliasing_ppgtt;
1334
Chris Wilson2cfcd322014-05-20 08:28:43 +01001335 struct notifier_block oom_notifier;
Chris Wilsone87666b2016-04-04 14:46:43 +01001336 struct notifier_block vmap_notifier;
Chris Wilsonceabbba52014-03-25 13:23:04 +00001337 struct shrinker shrinker;
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001338
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001339 /** LRU list of objects with fence regs on them. */
1340 struct list_head fence_list;
1341
1342 /**
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001343 * Are we in a non-interruptible section of code like
1344 * modesetting?
1345 */
1346 bool interruptible;
1347
Daniel Vetterbdf1e7e2014-05-21 17:37:52 +02001348 /* the indicator for dispatch video commands on two BSD rings */
Joonas Lahtinen6f633402016-09-01 14:58:21 +03001349 atomic_t bsd_engine_dispatch_index;
Daniel Vetterbdf1e7e2014-05-21 17:37:52 +02001350
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001351 /** Bit 6 swizzling required for X tiling */
1352 uint32_t bit_6_swizzle_x;
1353 /** Bit 6 swizzling required for Y tiling */
1354 uint32_t bit_6_swizzle_y;
1355
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001356 /* accounting, useful for userland debugging */
Daniel Vetterc20e8352013-07-24 22:40:23 +02001357 spinlock_t object_stat_lock;
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001358 size_t object_memory;
1359 u32 object_count;
1360};
1361
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001362struct drm_i915_error_state_buf {
Chris Wilson0a4cd7c2014-08-22 14:41:39 +01001363 struct drm_i915_private *i915;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001364 unsigned bytes;
1365 unsigned size;
1366 int err;
1367 u8 *buf;
1368 loff_t start;
1369 loff_t pos;
1370};
1371
Mika Kuoppalafc16b482013-06-06 15:18:39 +03001372struct i915_error_state_file_priv {
1373 struct drm_device *dev;
1374 struct drm_i915_error_state *error;
1375};
1376
Daniel Vetter99584db2012-11-14 17:14:04 +01001377struct i915_gpu_error {
1378 /* For hangcheck timer */
1379#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1380#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03001381 /* Hang gpu twice in this window and your context gets banned */
1382#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1383
Chris Wilson737b1502015-01-26 18:03:03 +02001384 struct delayed_work hangcheck_work;
Daniel Vetter99584db2012-11-14 17:14:04 +01001385
1386 /* For reset and error_state handling. */
1387 spinlock_t lock;
1388 /* Protected by the above dev->gpu_error.lock. */
1389 struct drm_i915_error_state *first_error;
Chris Wilson094f9a52013-09-25 17:34:55 +01001390
1391 unsigned long missed_irq_rings;
1392
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001393 /**
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02001394 * State variable controlling the reset flow and count
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001395 *
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02001396 * This is a counter which gets incremented when reset is triggered,
1397 * and again when reset has been handled. So odd values (lowest bit set)
1398 * means that reset is in progress and even values that
1399 * (reset_counter >> 1):th reset was successfully completed.
1400 *
1401 * If reset is not completed succesfully, the I915_WEDGE bit is
1402 * set meaning that hardware is terminally sour and there is no
1403 * recovery. All waiters on the reset_queue will be woken when
1404 * that happens.
1405 *
1406 * This counter is used by the wait_seqno code to notice that reset
1407 * event happened and it needs to restart the entire ioctl (since most
1408 * likely the seqno it waited for won't ever signal anytime soon).
Daniel Vetterf69061b2012-12-06 09:01:42 +01001409 *
1410 * This is important for lock-free wait paths, where no contended lock
1411 * naturally enforces the correct ordering between the bail-out of the
1412 * waiter and the gpu reset work code.
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001413 */
1414 atomic_t reset_counter;
1415
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001416#define I915_RESET_IN_PROGRESS_FLAG 1
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02001417#define I915_WEDGED (1 << 31)
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001418
1419 /**
Chris Wilson1f15b762016-07-01 17:23:14 +01001420 * Waitqueue to signal when a hang is detected. Used to for waiters
1421 * to release the struct_mutex for the reset to procede.
1422 */
1423 wait_queue_head_t wait_queue;
1424
1425 /**
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001426 * Waitqueue to signal when the reset has completed. Used by clients
1427 * that wait for dev_priv->mm.wedged to settle.
1428 */
1429 wait_queue_head_t reset_queue;
Daniel Vetter33196de2012-11-14 17:14:05 +01001430
Chris Wilson094f9a52013-09-25 17:34:55 +01001431 /* For missed irq/seqno simulation. */
Chris Wilson688e6c72016-07-01 17:23:15 +01001432 unsigned long test_irq_rings;
Daniel Vetter99584db2012-11-14 17:14:04 +01001433};
1434
Zhang Ruib8efb172013-02-05 15:41:53 +08001435enum modeset_restore {
1436 MODESET_ON_LID_OPEN,
1437 MODESET_DONE,
1438 MODESET_SUSPENDED,
1439};
1440
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001441#define DP_AUX_A 0x40
1442#define DP_AUX_B 0x10
1443#define DP_AUX_C 0x20
1444#define DP_AUX_D 0x30
1445
Xiong Zhang11c1b652015-08-17 16:04:04 +08001446#define DDC_PIN_B 0x05
1447#define DDC_PIN_C 0x04
1448#define DDC_PIN_D 0x06
1449
Paulo Zanoni6acab152013-09-12 17:06:24 -03001450struct ddi_vbt_port_info {
Damien Lespiauce4dd492014-08-01 11:07:54 +01001451 /*
1452 * This is an index in the HDMI/DVI DDI buffer translation table.
1453 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
1454 * populate this field.
1455 */
1456#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
Paulo Zanoni6acab152013-09-12 17:06:24 -03001457 uint8_t hdmi_level_shift;
Paulo Zanoni311a2092013-09-12 17:12:18 -03001458
1459 uint8_t supports_dvi:1;
1460 uint8_t supports_hdmi:1;
1461 uint8_t supports_dp:1;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001462
1463 uint8_t alternate_aux_channel;
Xiong Zhang11c1b652015-08-17 16:04:04 +08001464 uint8_t alternate_ddc_pin;
Antti Koskipaa75067dd2015-07-10 14:10:55 +03001465
1466 uint8_t dp_boost_level;
1467 uint8_t hdmi_boost_level;
Paulo Zanoni6acab152013-09-12 17:06:24 -03001468};
1469
Rodrigo Vivibfd7ebd2014-11-14 08:52:30 -08001470enum psr_lines_to_wait {
1471 PSR_0_LINES_TO_WAIT = 0,
1472 PSR_1_LINE_TO_WAIT,
1473 PSR_4_LINES_TO_WAIT,
1474 PSR_8_LINES_TO_WAIT
Pradeep Bhat83a72802014-03-28 10:14:57 +05301475};
1476
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001477struct intel_vbt_data {
1478 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1479 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1480
1481 /* Feature bits */
1482 unsigned int int_tv_support:1;
1483 unsigned int lvds_dither:1;
1484 unsigned int lvds_vbt:1;
1485 unsigned int int_crt_support:1;
1486 unsigned int lvds_use_ssc:1;
1487 unsigned int display_clock_mode:1;
1488 unsigned int fdi_rx_polarity_inverted:1;
Ville Syrjälä3e845c72016-04-08 16:28:12 +03001489 unsigned int panel_type:4;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001490 int lvds_ssc_freq;
1491 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1492
Pradeep Bhat83a72802014-03-28 10:14:57 +05301493 enum drrs_support_type drrs_type;
1494
Jani Nikula6aa23e62016-03-24 17:50:20 +02001495 struct {
1496 int rate;
1497 int lanes;
1498 int preemphasis;
1499 int vswing;
Jani Nikula06411f02016-03-24 17:50:21 +02001500 bool low_vswing;
Jani Nikula6aa23e62016-03-24 17:50:20 +02001501 bool initialized;
1502 bool support;
1503 int bpp;
1504 struct edp_power_seq pps;
1505 } edp;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001506
Jani Nikulaf00076d2013-12-14 20:38:29 -02001507 struct {
Rodrigo Vivibfd7ebd2014-11-14 08:52:30 -08001508 bool full_link;
1509 bool require_aux_wakeup;
1510 int idle_frames;
1511 enum psr_lines_to_wait lines_to_wait;
1512 int tp1_wakeup_time;
1513 int tp2_tp3_wakeup_time;
1514 } psr;
1515
1516 struct {
Jani Nikulaf00076d2013-12-14 20:38:29 -02001517 u16 pwm_freq_hz;
Jani Nikula39fbc9c2014-04-09 11:22:06 +03001518 bool present;
Jani Nikulaf00076d2013-12-14 20:38:29 -02001519 bool active_low_pwm;
Jani Nikula1de60682014-06-24 18:27:39 +03001520 u8 min_brightness; /* min_brightness/255 of max */
Deepak M9a41e172016-04-26 16:14:24 +03001521 enum intel_backlight_type type;
Jani Nikulaf00076d2013-12-14 20:38:29 -02001522 } backlight;
1523
Shobhit Kumard17c5442013-08-27 15:12:25 +03001524 /* MIPI DSI */
1525 struct {
1526 u16 panel_id;
Shobhit Kumard3b542f2014-04-14 11:00:34 +05301527 struct mipi_config *config;
1528 struct mipi_pps_data *pps;
1529 u8 seq_version;
1530 u32 size;
1531 u8 *data;
Jani Nikula8d3ed2f2015-12-21 15:10:57 +02001532 const u8 *sequence[MIPI_SEQ_MAX];
Shobhit Kumard17c5442013-08-27 15:12:25 +03001533 } dsi;
1534
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001535 int crt_ddc_pin;
1536
1537 int child_dev_num;
Paulo Zanoni768f69c2013-09-11 18:02:47 -03001538 union child_device_config *child_dev;
Paulo Zanoni6acab152013-09-12 17:06:24 -03001539
1540 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
Jani Nikula9d6c8752016-03-24 17:50:22 +02001541 struct sdvo_device_mapping sdvo_mappings[2];
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001542};
1543
Ville Syrjälä77c122b2013-08-06 22:24:04 +03001544enum intel_ddb_partitioning {
1545 INTEL_DDB_PART_1_2,
1546 INTEL_DDB_PART_5_6, /* IVB+ */
1547};
1548
Ville Syrjälä1fd527c2013-08-06 22:24:05 +03001549struct intel_wm_level {
1550 bool enable;
1551 uint32_t pri_val;
1552 uint32_t spr_val;
1553 uint32_t cur_val;
1554 uint32_t fbc_val;
1555};
1556
Imre Deak820c1982013-12-17 14:46:36 +02001557struct ilk_wm_values {
Ville Syrjälä609cede2013-10-09 19:18:03 +03001558 uint32_t wm_pipe[3];
1559 uint32_t wm_lp[3];
1560 uint32_t wm_lp_spr[3];
1561 uint32_t wm_linetime[3];
1562 bool enable_fbc_wm;
1563 enum intel_ddb_partitioning partitioning;
1564};
1565
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001566struct vlv_pipe_wm {
1567 uint16_t primary;
1568 uint16_t sprite[2];
1569 uint8_t cursor;
1570};
1571
1572struct vlv_sr_wm {
1573 uint16_t plane;
1574 uint8_t cursor;
1575};
1576
Ville Syrjälä0018fda2015-03-05 21:19:45 +02001577struct vlv_wm_values {
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001578 struct vlv_pipe_wm pipe[3];
1579 struct vlv_sr_wm sr;
Ville Syrjäläae801522015-03-05 21:19:49 +02001580 struct {
Ville Syrjälä0018fda2015-03-05 21:19:45 +02001581 uint8_t cursor;
1582 uint8_t sprite[2];
1583 uint8_t primary;
1584 } ddl[3];
Ville Syrjälä6eb1a682015-06-24 22:00:03 +03001585 uint8_t level;
1586 bool cxsr;
Ville Syrjälä0018fda2015-03-05 21:19:45 +02001587};
1588
Damien Lespiauc1939242014-11-04 17:06:41 +00001589struct skl_ddb_entry {
Damien Lespiau16160e32014-11-04 17:06:53 +00001590 uint16_t start, end; /* in number of blocks, 'end' is exclusive */
Damien Lespiauc1939242014-11-04 17:06:41 +00001591};
1592
1593static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
1594{
Damien Lespiau16160e32014-11-04 17:06:53 +00001595 return entry->end - entry->start;
Damien Lespiauc1939242014-11-04 17:06:41 +00001596}
1597
Damien Lespiau08db6652014-11-04 17:06:52 +00001598static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
1599 const struct skl_ddb_entry *e2)
1600{
1601 if (e1->start == e2->start && e1->end == e2->end)
1602 return true;
1603
1604 return false;
1605}
1606
Damien Lespiauc1939242014-11-04 17:06:41 +00001607struct skl_ddb_allocation {
Damien Lespiau34bb56a2014-11-04 17:07:01 +00001608 struct skl_ddb_entry pipe[I915_MAX_PIPES];
Chandra Konduru2cd601c2015-04-27 15:47:37 -07001609 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
Matt Roper4969d332015-09-24 15:53:10 -07001610 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
Damien Lespiauc1939242014-11-04 17:06:41 +00001611};
1612
Pradeep Bhat2ac96d22014-11-04 17:06:40 +00001613struct skl_wm_values {
Matt Roper2b4b9f32016-05-12 07:06:07 -07001614 unsigned dirty_pipes;
Damien Lespiauc1939242014-11-04 17:06:41 +00001615 struct skl_ddb_allocation ddb;
Pradeep Bhat2ac96d22014-11-04 17:06:40 +00001616 uint32_t wm_linetime[I915_MAX_PIPES];
1617 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
Pradeep Bhat2ac96d22014-11-04 17:06:40 +00001618 uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
Pradeep Bhat2ac96d22014-11-04 17:06:40 +00001619};
1620
1621struct skl_wm_level {
1622 bool plane_en[I915_MAX_PLANES];
1623 uint16_t plane_res_b[I915_MAX_PLANES];
1624 uint8_t plane_res_l[I915_MAX_PLANES];
Pradeep Bhat2ac96d22014-11-04 17:06:40 +00001625};
1626
Paulo Zanonic67a4702013-08-19 13:18:09 -03001627/*
Paulo Zanoni765dab672014-03-07 20:08:18 -03001628 * This struct helps tracking the state needed for runtime PM, which puts the
1629 * device in PCI D3 state. Notice that when this happens, nothing on the
1630 * graphics device works, even register access, so we don't get interrupts nor
1631 * anything else.
Paulo Zanonic67a4702013-08-19 13:18:09 -03001632 *
Paulo Zanoni765dab672014-03-07 20:08:18 -03001633 * Every piece of our code that needs to actually touch the hardware needs to
1634 * either call intel_runtime_pm_get or call intel_display_power_get with the
1635 * appropriate power domain.
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03001636 *
Paulo Zanoni765dab672014-03-07 20:08:18 -03001637 * Our driver uses the autosuspend delay feature, which means we'll only really
1638 * suspend if we stay with zero refcount for a certain amount of time. The
Daniel Vetterf458ebb2014-09-30 10:56:39 +02001639 * default value is currently very conservative (see intel_runtime_pm_enable), but
Paulo Zanoni765dab672014-03-07 20:08:18 -03001640 * it can be changed with the standard runtime PM files from sysfs.
Paulo Zanonic67a4702013-08-19 13:18:09 -03001641 *
1642 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1643 * goes back to false exactly before we reenable the IRQs. We use this variable
1644 * to check if someone is trying to enable/disable IRQs while they're supposed
1645 * to be disabled. This shouldn't happen and we'll print some error messages in
Paulo Zanoni730488b2014-03-07 20:12:32 -03001646 * case it happens.
Paulo Zanonic67a4702013-08-19 13:18:09 -03001647 *
Paulo Zanoni765dab672014-03-07 20:08:18 -03001648 * For more, read the Documentation/power/runtime_pm.txt.
Paulo Zanonic67a4702013-08-19 13:18:09 -03001649 */
Paulo Zanoni5d584b22014-03-07 20:08:15 -03001650struct i915_runtime_pm {
Imre Deak1f814da2015-12-16 02:52:19 +02001651 atomic_t wakeref_count;
Imre Deak2b19efe2015-12-15 20:10:37 +02001652 atomic_t atomic_seq;
Paulo Zanoni5d584b22014-03-07 20:08:15 -03001653 bool suspended;
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02001654 bool irqs_enabled;
Paulo Zanonic67a4702013-08-19 13:18:09 -03001655};
1656
Daniel Vetter926321d2013-10-16 13:30:34 +02001657enum intel_pipe_crc_source {
1658 INTEL_PIPE_CRC_SOURCE_NONE,
1659 INTEL_PIPE_CRC_SOURCE_PLANE1,
1660 INTEL_PIPE_CRC_SOURCE_PLANE2,
1661 INTEL_PIPE_CRC_SOURCE_PF,
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001662 INTEL_PIPE_CRC_SOURCE_PIPE,
Daniel Vetter3d099a02013-10-16 22:55:58 +02001663 /* TV/DP on pre-gen5/vlv can't use the pipe source. */
1664 INTEL_PIPE_CRC_SOURCE_TV,
1665 INTEL_PIPE_CRC_SOURCE_DP_B,
1666 INTEL_PIPE_CRC_SOURCE_DP_C,
1667 INTEL_PIPE_CRC_SOURCE_DP_D,
Daniel Vetter46a19182013-11-01 10:50:20 +01001668 INTEL_PIPE_CRC_SOURCE_AUTO,
Daniel Vetter926321d2013-10-16 13:30:34 +02001669 INTEL_PIPE_CRC_SOURCE_MAX,
1670};
1671
Shuang He8bf1e9f2013-10-15 18:55:27 +01001672struct intel_pipe_crc_entry {
Damien Lespiauac2300d2013-10-15 18:55:30 +01001673 uint32_t frame;
Shuang He8bf1e9f2013-10-15 18:55:27 +01001674 uint32_t crc[5];
1675};
1676
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001677#define INTEL_PIPE_CRC_ENTRIES_NR 128
Shuang He8bf1e9f2013-10-15 18:55:27 +01001678struct intel_pipe_crc {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001679 spinlock_t lock;
1680 bool opened; /* exclusive access to the result file */
Damien Lespiaue5f75ac2013-10-15 18:55:34 +01001681 struct intel_pipe_crc_entry *entries;
Daniel Vetter926321d2013-10-16 13:30:34 +02001682 enum intel_pipe_crc_source source;
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001683 int head, tail;
Damien Lespiau07144422013-10-15 18:55:40 +01001684 wait_queue_head_t wq;
Shuang He8bf1e9f2013-10-15 18:55:27 +01001685};
1686
Daniel Vetterf99d7062014-06-19 16:01:59 +02001687struct i915_frontbuffer_tracking {
Chris Wilsonb5add952016-08-04 16:32:36 +01001688 spinlock_t lock;
Daniel Vetterf99d7062014-06-19 16:01:59 +02001689
1690 /*
1691 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
1692 * scheduled flips.
1693 */
1694 unsigned busy_bits;
1695 unsigned flip_bits;
1696};
1697
Mika Kuoppala72253422014-10-07 17:21:26 +03001698struct i915_wa_reg {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001699 i915_reg_t addr;
Mika Kuoppala72253422014-10-07 17:21:26 +03001700 u32 value;
1701 /* bitmask representing WA bits */
1702 u32 mask;
1703};
1704
Arun Siluvery33136b02016-01-21 21:43:47 +00001705/*
1706 * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
1707 * allowing it for RCS as we don't foresee any requirement of having
1708 * a whitelist for other engines. When it is really required for
1709 * other engines then the limit need to be increased.
1710 */
1711#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
Mika Kuoppala72253422014-10-07 17:21:26 +03001712
1713struct i915_workarounds {
1714 struct i915_wa_reg reg[I915_MAX_WA_REGS];
1715 u32 count;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001716 u32 hw_whitelist_count[I915_NUM_ENGINES];
Mika Kuoppala72253422014-10-07 17:21:26 +03001717};
1718
Yu Zhangcf9d2892015-02-10 19:05:47 +08001719struct i915_virtual_gpu {
1720 bool active;
1721};
1722
Matt Roperaa363132015-09-24 15:53:18 -07001723/* used in computing the new watermarks state */
1724struct intel_wm_config {
1725 unsigned int num_pipes_active;
1726 bool sprites_enabled;
1727 bool sprites_scaled;
1728};
1729
Jani Nikula77fec552014-03-31 14:27:22 +03001730struct drm_i915_private {
Chris Wilson8f460e22016-06-24 14:00:18 +01001731 struct drm_device drm;
1732
Chris Wilsonefab6d82015-04-07 16:20:57 +01001733 struct kmem_cache *objects;
Chris Wilsone20d2ab2015-04-07 16:20:58 +01001734 struct kmem_cache *vmas;
Chris Wilsonefab6d82015-04-07 16:20:57 +01001735 struct kmem_cache *requests;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001736
Damien Lespiau5c969aa2014-02-07 19:12:48 +00001737 const struct intel_device_info info;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001738
1739 int relative_constants_mode;
1740
1741 void __iomem *regs;
1742
Chris Wilson907b28c2013-07-19 20:36:52 +01001743 struct intel_uncore uncore;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001744
Yu Zhangcf9d2892015-02-10 19:05:47 +08001745 struct i915_virtual_gpu vgpu;
1746
Zhi Wang0ad35fe2016-06-16 08:07:00 -04001747 struct intel_gvt gvt;
1748
Alex Dai33a732f2015-08-12 15:43:36 +01001749 struct intel_guc guc;
1750
Daniel Vettereb805622015-05-04 14:58:44 +02001751 struct intel_csr csr;
1752
Jani Nikula5ea6e5e2015-04-01 10:55:04 +03001753 struct intel_gmbus gmbus[GMBUS_NUM_PINS];
Daniel Vetter28c70f12012-12-01 13:53:45 +01001754
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001755 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1756 * controller on different i2c buses. */
1757 struct mutex gmbus_mutex;
1758
1759 /**
1760 * Base address of the gmbus and gpio block.
1761 */
1762 uint32_t gpio_mmio_base;
1763
Shashank Sharmab6fdd0f2014-05-19 20:54:03 +05301764 /* MMIO base address for MIPI regs */
1765 uint32_t mipi_mmio_base;
1766
Ville Syrjälä443a3892015-11-11 20:34:15 +02001767 uint32_t psr_mmio_base;
1768
Imre Deak44cb7342016-08-10 14:07:29 +03001769 uint32_t pps_mmio_base;
1770
Daniel Vetter28c70f12012-12-01 13:53:45 +01001771 wait_queue_head_t gmbus_wait_queue;
1772
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001773 struct pci_dev *bridge_dev;
Chris Wilson0ca5fa32016-05-24 14:53:40 +01001774 struct i915_gem_context *kernel_context;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001775 struct intel_engine_cs engine[I915_NUM_ENGINES];
Chris Wilson51d545d2016-08-15 10:49:02 +01001776 struct i915_vma *semaphore;
Chris Wilsonddf07be2016-08-02 22:50:39 +01001777 u32 next_seqno;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001778
Daniel Vetterba8286f2014-09-11 07:43:25 +02001779 struct drm_dma_handle *status_page_dmah;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001780 struct resource mch_res;
1781
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001782 /* protects the irq masks */
1783 spinlock_t irq_lock;
1784
Sourab Gupta84c33a62014-06-02 16:47:17 +05301785 /* protects the mmio flip data */
1786 spinlock_t mmio_flip_lock;
1787
Imre Deakf8b79e52014-03-04 19:23:07 +02001788 bool display_irqs_enabled;
1789
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001790 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1791 struct pm_qos_request pm_qos;
1792
Ville Syrjäläa5805162015-05-26 20:42:30 +03001793 /* Sideband mailbox protection */
1794 struct mutex sb_lock;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001795
1796 /** Cached value of IMR to avoid reads in updating the bitfield */
Ben Widawskyabd58f02013-11-02 21:07:09 -07001797 union {
1798 u32 irq_mask;
1799 u32 de_irq_mask[I915_MAX_PIPES];
1800 };
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001801 u32 gt_irq_mask;
Paulo Zanoni605cd252013-08-06 18:57:15 -03001802 u32 pm_irq_mask;
Deepak Sa6706b42014-03-15 20:23:22 +05301803 u32 pm_rps_events;
Imre Deak91d181d2014-02-10 18:42:49 +02001804 u32 pipestat_irq_mask[I915_MAX_PIPES];
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001805
Jani Nikula5fcece82015-05-27 15:03:42 +03001806 struct i915_hotplug hotplug;
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001807 struct intel_fbc fbc;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301808 struct i915_drrs drrs;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001809 struct intel_opregion opregion;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001810 struct intel_vbt_data vbt;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001811
Jesse Barnesd9ceb812014-10-09 12:57:43 -07001812 bool preserve_bios_swizzle;
1813
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001814 /* overlay */
1815 struct intel_overlay *overlay;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001816
Jani Nikula58c68772013-11-08 16:48:54 +02001817 /* backlight registers and fields in struct intel_panel */
Daniel Vetter07f11d42014-09-15 14:35:09 +02001818 struct mutex backlight_lock;
Jani Nikula31ad8ec2013-04-02 15:48:09 +03001819
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001820 /* LVDS info */
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001821 bool no_aux_handshake;
1822
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001823 /* protects panel power sequencer state */
1824 struct mutex pps_mutex;
1825
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001826 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001827 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1828
1829 unsigned int fsb_freq, mem_freq, is_ddr3;
Ville Syrjäläb2045352016-05-13 23:41:27 +03001830 unsigned int skl_preferred_vco_freq;
Maarten Lankhorst1a617b72015-12-03 14:31:06 +01001831 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
Mika Kaholaadafdc62015-08-18 14:36:59 +03001832 unsigned int max_dotclk_freq;
Ville Syrjäläe7dc33f2016-03-02 17:22:13 +02001833 unsigned int rawclk_freq;
Ville Syrjälä6bcda4f2014-10-07 17:41:22 +03001834 unsigned int hpll_freq;
Ville Syrjäläbfa7df02015-09-24 23:29:18 +03001835 unsigned int czclk_freq;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001836
Ville Syrjälä63911d72016-05-13 23:41:32 +03001837 struct {
Ville Syrjälä709e05c2016-05-13 23:41:33 +03001838 unsigned int vco, ref;
Ville Syrjälä63911d72016-05-13 23:41:32 +03001839 } cdclk_pll;
1840
Daniel Vetter645416f2013-09-02 16:22:25 +02001841 /**
1842 * wq - Driver workqueue for GEM.
1843 *
1844 * NOTE: Work items scheduled here are not allowed to grab any modeset
1845 * locks, for otherwise the flushing done in the pageflip code will
1846 * result in deadlocks.
1847 */
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001848 struct workqueue_struct *wq;
1849
1850 /* Display functions */
1851 struct drm_i915_display_funcs display;
1852
1853 /* PCH chipset type */
1854 enum intel_pch pch_type;
Paulo Zanoni17a303e2012-11-20 15:12:07 -02001855 unsigned short pch_id;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001856
1857 unsigned long quirks;
1858
Zhang Ruib8efb172013-02-05 15:41:53 +08001859 enum modeset_restore modeset_restore;
1860 struct mutex modeset_restore_lock;
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01001861 struct drm_atomic_state *modeset_restore_state;
Maarten Lankhorst73974892016-08-05 23:28:27 +03001862 struct drm_modeset_acquire_ctx reset_ctx;
Eric Anholt673a3942008-07-30 12:06:12 -07001863
Ben Widawskya7bbbd62013-07-16 16:50:07 -07001864 struct list_head vm_list; /* Global list of all address spaces */
Joonas Lahtinen62106b42016-03-18 10:42:57 +02001865 struct i915_ggtt ggtt; /* VM representing the global address space */
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001866
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001867 struct i915_gem_mm mm;
Chris Wilsonad46cb52014-08-07 14:20:40 +01001868 DECLARE_HASHTABLE(mm_structs, 7);
1869 struct mutex mm_lock;
Daniel Vetter87813422012-05-02 11:49:32 +02001870
Chris Wilson5d1808e2016-04-28 09:56:51 +01001871 /* The hw wants to have a stable context identifier for the lifetime
1872 * of the context (for OA, PASID, faults, etc). This is limited
1873 * in execlists to 21 bits.
1874 */
1875 struct ida context_hw_ida;
1876#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
1877
Daniel Vetter87813422012-05-02 11:49:32 +02001878 /* Kernel Modesetting */
1879
Damien Lespiau76c4ac02014-02-07 19:12:52 +00001880 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1881 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05001882 wait_queue_head_t pending_flip_queue;
1883
Daniel Vetterc4597872013-10-21 21:04:07 +02001884#ifdef CONFIG_DEBUG_FS
1885 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1886#endif
1887
Maarten Lankhorst565602d2015-12-10 12:33:57 +01001888 /* dpll and cdclk state is protected by connection_mutex */
Daniel Vettere72f9fb2013-06-05 13:34:06 +02001889 int num_shared_dpll;
1890 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
Ander Conselvan de Oliveiraf9476a62016-03-08 17:46:22 +02001891 const struct intel_dpll_mgr *dpll_mgr;
Maarten Lankhorst565602d2015-12-10 12:33:57 +01001892
Maarten Lankhorstfbf6d872016-03-23 14:51:12 +01001893 /*
1894 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
1895 * Must be global rather than per dpll, because on some platforms
1896 * plls share registers.
1897 */
1898 struct mutex dpll_lock;
1899
Maarten Lankhorst565602d2015-12-10 12:33:57 +01001900 unsigned int active_crtcs;
1901 unsigned int min_pixclk[I915_MAX_PIPES];
1902
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001903 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
Jesse Barnesee7b9f92012-04-20 17:11:53 +01001904
Mika Kuoppala72253422014-10-07 17:21:26 +03001905 struct i915_workarounds workarounds;
Arun Siluvery888b5992014-08-26 14:44:51 +01001906
Daniel Vetterf99d7062014-06-19 16:01:59 +02001907 struct i915_frontbuffer_tracking fb_tracking;
1908
Jesse Barnes652c3932009-08-17 13:31:43 -07001909 u16 orig_clock;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001910
Zhenyu Wangc48044112009-12-17 14:48:43 +08001911 bool mchbar_need_disable;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001912
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001913 struct intel_l3_parity l3_parity;
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001914
Ben Widawsky59124502013-07-04 11:02:05 -07001915 /* Cannot be determined by PCIID. You must always read a register. */
Mika Kuoppala3accaf72016-04-13 17:26:43 +03001916 u32 edram_cap;
Ben Widawsky59124502013-07-04 11:02:05 -07001917
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001918 /* gen6+ rps state */
Daniel Vetterc85aa882012-11-02 19:55:03 +01001919 struct intel_gen6_power_mgmt rps;
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001920
Daniel Vetter20e4d402012-08-08 23:35:39 +02001921 /* ilk-only ips/rps state. Everything in here is protected by the global
1922 * mchdev_lock in intel_pm.c */
Daniel Vetterc85aa882012-11-02 19:55:03 +01001923 struct intel_ilk_power_mgmt ips;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001924
Imre Deak83c00f52013-10-25 17:36:47 +03001925 struct i915_power_domains power_domains;
Wang Xingchaoa38911a2013-05-30 22:07:11 +08001926
Rodrigo Vivia031d702013-10-03 16:15:06 -03001927 struct i915_psr psr;
Rodrigo Vivi3f51e472013-07-11 18:45:00 -03001928
Daniel Vetter99584db2012-11-14 17:14:04 +01001929 struct i915_gpu_error gpu_error;
Chris Wilsonae681d92010-10-01 14:57:56 +01001930
Jesse Barnesc9cddff2013-05-08 10:45:13 -07001931 struct drm_i915_gem_object *vlv_pctx;
1932
Daniel Vetter06957262015-08-10 13:34:08 +02001933#ifdef CONFIG_DRM_FBDEV_EMULATION
Dave Airlie8be48d92010-03-30 05:34:14 +00001934 /* list of fbdev register on this device */
1935 struct intel_fbdev *fbdev;
Chris Wilson82e3b8c2014-08-13 13:09:46 +01001936 struct work_struct fbdev_suspend_work;
Daniel Vetter4520f532013-10-09 09:18:51 +02001937#endif
Chris Wilsone953fd72011-02-21 22:23:52 +00001938
1939 struct drm_property *broadcast_rgb_property;
Chris Wilson3f43c482011-05-12 22:17:24 +01001940 struct drm_property *force_audio_property;
Ben Widawskye3689192012-05-25 16:56:22 -07001941
Imre Deak58fddc22015-01-08 17:54:14 +02001942 /* hda/i915 audio component */
David Henningsson51e1d832015-08-19 10:48:56 +02001943 struct i915_audio_component *audio_component;
Imre Deak58fddc22015-01-08 17:54:14 +02001944 bool audio_component_registered;
Libin Yang4a21ef72015-09-02 14:11:39 +08001945 /**
1946 * av_mutex - mutex for audio/video sync
1947 *
1948 */
1949 struct mutex av_mutex;
Imre Deak58fddc22015-01-08 17:54:14 +02001950
Ben Widawsky254f9652012-06-04 14:42:42 -07001951 uint32_t hw_context_size;
Ben Widawskya33afea2013-09-17 21:12:45 -07001952 struct list_head context_list;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001953
Damien Lespiau3e683202012-12-11 18:48:29 +00001954 u32 fdi_rx_config;
Paulo Zanoni68d18ad2012-12-01 12:04:26 -02001955
Ville Syrjäläc2317752016-03-15 16:39:56 +02001956 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
Ville Syrjälä70722462015-04-10 18:21:28 +03001957 u32 chv_phy_control;
Ville Syrjäläc2317752016-03-15 16:39:56 +02001958 /*
1959 * Shadows for CHV DPLL_MD regs to keep the state
1960 * checker somewhat working in the presence hardware
1961 * crappiness (can't read out DPLL_MD for pipes B & C).
1962 */
1963 u32 chv_dpll_md[I915_MAX_PIPES];
Imre Deakadc7f042016-04-04 17:27:10 +03001964 u32 bxt_phy_grc;
Ville Syrjälä70722462015-04-10 18:21:28 +03001965
Daniel Vetter842f1c82014-03-10 10:01:44 +01001966 u32 suspend_count;
Imre Deakbc872292015-11-18 17:32:30 +02001967 bool suspended_to_idle;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001968 struct i915_suspend_saved_registers regfile;
Imre Deakddeea5b2014-05-05 15:19:56 +03001969 struct vlv_s0ix_state vlv_s0ix_state;
Daniel Vetter231f42a2012-11-02 19:55:05 +01001970
Lyude656d1b82016-08-17 15:55:54 -04001971 enum {
1972 I915_SKL_SAGV_UNKNOWN = 0,
1973 I915_SKL_SAGV_DISABLED,
1974 I915_SKL_SAGV_ENABLED,
1975 I915_SKL_SAGV_NOT_CONTROLLED
1976 } skl_sagv_status;
1977
Ville Syrjälä53615a52013-08-01 16:18:50 +03001978 struct {
1979 /*
1980 * Raw watermark latency values:
1981 * in 0.1us units for WM0,
1982 * in 0.5us units for WM1+.
1983 */
1984 /* primary */
1985 uint16_t pri_latency[5];
1986 /* sprite */
1987 uint16_t spr_latency[5];
1988 /* cursor */
1989 uint16_t cur_latency[5];
Pradeep Bhat2af30a52014-11-04 17:06:38 +00001990 /*
1991 * Raw watermark memory latency values
1992 * for SKL for all 8 levels
1993 * in 1us units.
1994 */
1995 uint16_t skl_latency[8];
Ville Syrjälä609cede2013-10-09 19:18:03 +03001996
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00001997 /*
1998 * The skl_wm_values structure is a bit too big for stack
1999 * allocation, so we keep the staging struct where we store
2000 * intermediate results here instead.
2001 */
2002 struct skl_wm_values skl_results;
2003
Ville Syrjälä609cede2013-10-09 19:18:03 +03002004 /* current hardware state */
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00002005 union {
2006 struct ilk_wm_values hw;
2007 struct skl_wm_values skl_hw;
Ville Syrjälä0018fda2015-03-05 21:19:45 +02002008 struct vlv_wm_values vlv;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00002009 };
Ville Syrjälä58590c12015-09-08 21:05:12 +03002010
2011 uint8_t max_level;
Matt Ropered4a6a72016-02-23 17:20:13 -08002012
2013 /*
2014 * Should be held around atomic WM register writing; also
2015 * protects * intel_crtc->wm.active and
2016 * cstate->wm.need_postvbl_update.
2017 */
2018 struct mutex wm_mutex;
Matt Roper279e99d2016-05-12 07:06:02 -07002019
2020 /*
2021 * Set during HW readout of watermarks/DDB. Some platforms
2022 * need to know when we're still using BIOS-provided values
2023 * (which we don't fully trust).
2024 */
2025 bool distrust_bios_wm;
Ville Syrjälä53615a52013-08-01 16:18:50 +03002026 } wm;
2027
Paulo Zanoni8a187452013-12-06 20:32:13 -02002028 struct i915_runtime_pm pm;
2029
Oscar Mateoa83014d2014-07-24 17:04:21 +01002030 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
2031 struct {
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00002032 void (*cleanup_engine)(struct intel_engine_cs *engine);
Chris Wilson67d97da2016-07-04 08:08:31 +01002033
2034 /**
2035 * Is the GPU currently considered idle, or busy executing
2036 * userspace requests? Whilst idle, we allow runtime power
2037 * management to power down the hardware and display clocks.
2038 * In order to reduce the effect on performance, there
2039 * is a slight delay before we do so.
2040 */
2041 unsigned int active_engines;
2042 bool awake;
2043
2044 /**
2045 * We leave the user IRQ off as much as possible,
2046 * but this means that requests will finish and never
2047 * be retired once the system goes idle. Set a timer to
2048 * fire periodically while the ring is running. When it
2049 * fires, go retire requests.
2050 */
2051 struct delayed_work retire_work;
2052
2053 /**
2054 * When we detect an idle GPU, we want to turn on
2055 * powersaving features. So once we see that there
2056 * are no more requests outstanding and no more
2057 * arrive within a small period of time, we fire
2058 * off the idle_work.
2059 */
2060 struct delayed_work idle_work;
Oscar Mateoa83014d2014-07-24 17:04:21 +01002061 } gt;
2062
Ville Syrjälä3be60de2015-09-08 18:05:45 +03002063 /* perform PHY state sanity checks? */
2064 bool chv_phy_assert[2];
2065
Takashi Iwai0bdf5a02015-11-30 18:19:39 +01002066 struct intel_encoder *dig_port_map[I915_MAX_PORTS];
2067
Daniel Vetterbdf1e7e2014-05-21 17:37:52 +02002068 /*
2069 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
2070 * will be rejected. Instead look for a better place.
2071 */
Jani Nikula77fec552014-03-31 14:27:22 +03002072};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
Chris Wilson2c1792a2013-08-01 18:39:55 +01002074static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
2075{
Chris Wilson091387c2016-06-24 14:00:21 +01002076 return container_of(dev, struct drm_i915_private, drm);
Chris Wilson2c1792a2013-08-01 18:39:55 +01002077}
2078
David Weinehallc49d13e2016-08-22 13:32:42 +03002079static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
Imre Deak888d0d42015-01-08 17:54:13 +02002080{
David Weinehallc49d13e2016-08-22 13:32:42 +03002081 return to_i915(dev_get_drvdata(kdev));
Imre Deak888d0d42015-01-08 17:54:13 +02002082}
2083
Alex Dai33a732f2015-08-12 15:43:36 +01002084static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
2085{
2086 return container_of(guc, struct drm_i915_private, guc);
2087}
2088
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002089/* Simple iterator over all initialised engines */
2090#define for_each_engine(engine__, dev_priv__) \
2091 for ((engine__) = &(dev_priv__)->engine[0]; \
2092 (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
2093 (engine__)++) \
2094 for_each_if (intel_engine_initialized(engine__))
Chris Wilsonb4519512012-05-11 14:29:30 +01002095
Dave Gordonc3232b12016-03-23 18:19:53 +00002096/* Iterator with engine_id */
2097#define for_each_engine_id(engine__, dev_priv__, id__) \
2098 for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
2099 (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
2100 (engine__)++) \
2101 for_each_if (((id__) = (engine__)->id, \
2102 intel_engine_initialized(engine__)))
2103
Chris Wilsonbafb0fc2016-08-27 08:54:01 +01002104#define __mask_next_bit(mask) ({ \
2105 int __idx = ffs(mask) - 1; \
2106 mask &= ~BIT(__idx); \
2107 __idx; \
2108})
2109
Dave Gordonc3232b12016-03-23 18:19:53 +00002110/* Iterator over subset of engines selected by mask */
Chris Wilsonbafb0fc2016-08-27 08:54:01 +01002111#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
2112 for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
2113 tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
Mika Kuoppalaee4b6fa2016-03-16 17:54:00 +02002114
Wu Fengguangb1d7e4b2012-02-14 11:45:36 +08002115enum hdmi_force_audio {
2116 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
2117 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
2118 HDMI_AUDIO_AUTO, /* trust EDID */
2119 HDMI_AUDIO_ON, /* force turn on HDMI audio */
2120};
2121
Daniel Vetter190d6cd2013-07-04 13:06:28 +02002122#define I915_GTT_OFFSET_NONE ((u32)-1)
Chris Wilsoned2f3452012-11-15 11:32:19 +00002123
Chris Wilson37e680a2012-06-07 15:38:42 +01002124struct drm_i915_gem_object_ops {
Chris Wilsonde472662016-01-22 18:32:31 +00002125 unsigned int flags;
2126#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
2127
Chris Wilson37e680a2012-06-07 15:38:42 +01002128 /* Interface between the GEM object and its backing storage.
2129 * get_pages() is called once prior to the use of the associated set
2130 * of pages before to binding them into the GTT, and put_pages() is
2131 * called after we no longer need them. As we expect there to be
2132 * associated cost with migrating pages between the backing storage
2133 * and making them available for the GPU (e.g. clflush), we may hold
2134 * onto the pages after they are no longer referenced by the GPU
2135 * in case they may be used again shortly (for example migrating the
2136 * pages to a different memory domain within the GTT). put_pages()
2137 * will therefore most likely be called when the object itself is
2138 * being released or under memory pressure (where we attempt to
2139 * reap pages for the shrinker).
2140 */
2141 int (*get_pages)(struct drm_i915_gem_object *);
2142 void (*put_pages)(struct drm_i915_gem_object *);
Chris Wilsonde472662016-01-22 18:32:31 +00002143
Chris Wilson5cc9ed42014-05-16 14:22:37 +01002144 int (*dmabuf_export)(struct drm_i915_gem_object *);
2145 void (*release)(struct drm_i915_gem_object *);
Chris Wilson37e680a2012-06-07 15:38:42 +01002146};
2147
Daniel Vettera071fa02014-06-18 23:28:09 +02002148/*
2149 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
Sagar Arun Kambled1b9d032015-09-14 21:35:42 +05302150 * considered to be the frontbuffer for the given plane interface-wise. This
Daniel Vettera071fa02014-06-18 23:28:09 +02002151 * doesn't mean that the hw necessarily already scans it out, but that any
2152 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
2153 *
2154 * We have one bit per pipe and per scanout plane type.
2155 */
Sagar Arun Kambled1b9d032015-09-14 21:35:42 +05302156#define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
2157#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
Daniel Vettera071fa02014-06-18 23:28:09 +02002158#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
2159 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
2160#define INTEL_FRONTBUFFER_CURSOR(pipe) \
Sagar Arun Kambled1b9d032015-09-14 21:35:42 +05302161 (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2162#define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \
2163 (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
Daniel Vettera071fa02014-06-18 23:28:09 +02002164#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
Sagar Arun Kambled1b9d032015-09-14 21:35:42 +05302165 (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
Daniel Vettercc365132014-06-18 13:59:13 +02002166#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
Sagar Arun Kambled1b9d032015-09-14 21:35:42 +05302167 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
Daniel Vettera071fa02014-06-18 23:28:09 +02002168
Eric Anholt673a3942008-07-30 12:06:12 -07002169struct drm_i915_gem_object {
Daniel Vetterc397b902010-04-09 19:05:07 +00002170 struct drm_gem_object base;
Eric Anholt673a3942008-07-30 12:06:12 -07002171
Chris Wilson37e680a2012-06-07 15:38:42 +01002172 const struct drm_i915_gem_object_ops *ops;
2173
Ben Widawsky2f633152013-07-17 12:19:03 -07002174 /** List of VMAs backed by this object */
2175 struct list_head vma_list;
2176
Chris Wilsonc1ad11f2012-11-15 11:32:21 +00002177 /** Stolen memory for this object, instead of being backed by shmem. */
2178 struct drm_mm_node *stolen;
Ben Widawsky35c20a62013-05-31 11:28:48 -07002179 struct list_head global_list;
Eric Anholt673a3942008-07-30 12:06:12 -07002180
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02002181 /** Used in execbuf to temporarily hold a ref */
2182 struct list_head obj_exec_link;
Eric Anholt673a3942008-07-30 12:06:12 -07002183
Chris Wilson8d9d5742015-04-07 16:20:38 +01002184 struct list_head batch_pool_link;
Brad Volkin493018d2014-12-11 12:13:08 -08002185
Chris Wilson573adb32016-08-04 16:32:39 +01002186 unsigned long flags;
Eric Anholt673a3942008-07-30 12:06:12 -07002187 /**
Chris Wilson65ce3022012-07-20 12:41:02 +01002188 * This is set if the object is on the active lists (has pending
2189 * rendering and so a non-zero seqno), and is not set if it i s on
2190 * inactive (ready to be unbound) list.
Eric Anholt673a3942008-07-30 12:06:12 -07002191 */
Chris Wilson573adb32016-08-04 16:32:39 +01002192#define I915_BO_ACTIVE_SHIFT 0
2193#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
2194#define __I915_BO_ACTIVE(bo) \
2195 ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
Eric Anholt673a3942008-07-30 12:06:12 -07002196
2197 /**
2198 * This is set if the object has been written to since last bound
2199 * to the GTT
2200 */
Akshay Joshi0206e352011-08-16 15:34:10 -04002201 unsigned int dirty:1;
Daniel Vetter778c3542010-05-13 11:49:44 +02002202
2203 /**
Daniel Vetter778c3542010-05-13 11:49:44 +02002204 * Advice: are the backing pages purgeable?
2205 */
Akshay Joshi0206e352011-08-16 15:34:10 -04002206 unsigned int madv:2;
Daniel Vetter778c3542010-05-13 11:49:44 +02002207
2208 /**
Daniel Vetterfb7d5162010-10-01 22:05:20 +02002209 * Whether the current gtt mapping needs to be mappable (and isn't just
2210 * mappable by accident). Track pin and fault separate for a more
2211 * accurate mappable working set.
2212 */
Akshay Joshi0206e352011-08-16 15:34:10 -04002213 unsigned int fault_mappable:1;
Daniel Vetterfb7d5162010-10-01 22:05:20 +02002214
Chris Wilsoncaea7472010-11-12 13:53:37 +00002215 /*
Akash Goel24f3a8c2014-06-17 10:59:42 +05302216 * Is the object to be mapped as read-only to the GPU
2217 * Only honoured if hardware has relevant pte bit
2218 */
2219 unsigned long gt_ro:1;
Chris Wilson651d7942013-08-08 14:41:10 +01002220 unsigned int cache_level:3;
Chris Wilson0f719792015-01-13 13:32:52 +00002221 unsigned int cache_dirty:1;
Chris Wilson93dfb402011-03-29 16:59:50 -07002222
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01002223 atomic_t frontbuffer_bits;
Chris Wilson50349242016-08-18 17:17:04 +01002224 unsigned int frontbuffer_ggtt_origin; /* write once */
Daniel Vettera071fa02014-06-18 23:28:09 +02002225
Chris Wilson9ad36762016-08-05 10:14:21 +01002226 /** Current tiling stride for the object, if it's tiled. */
Chris Wilson3e510a82016-08-05 10:14:23 +01002227 unsigned int tiling_and_stride;
2228#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
2229#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
2230#define STRIDE_MASK (~TILING_MASK)
Chris Wilson9ad36762016-08-05 10:14:21 +01002231
Chris Wilson15717de2016-08-04 07:52:26 +01002232 /** Count of VMA actually bound by this object */
2233 unsigned int bind_count;
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01002234 unsigned int pin_display;
2235
Chris Wilson9da3da62012-06-01 15:20:22 +01002236 struct sg_table *pages;
Chris Wilsona5570172012-09-04 21:02:54 +01002237 int pages_pin_count;
Chris Wilsonee286372015-04-07 16:20:25 +01002238 struct get_page {
2239 struct scatterlist *sg;
2240 int last;
2241 } get_page;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002242 void *mapping;
Dave Airlie9a70cc22012-05-22 13:09:21 +01002243
Chris Wilsonb4716182015-04-27 13:41:17 +01002244 /** Breadcrumb of last rendering to the buffer.
2245 * There can only be one writer, but we allow for multiple readers.
2246 * If there is a writer that necessarily implies that all other
2247 * read requests are complete - but we may only be lazily clearing
2248 * the read requests. A read request is naturally the most recent
2249 * request on a ring, so we may have two different write and read
2250 * requests on one ring where the write request is older than the
2251 * read request. This allows for the CPU to read from an active
2252 * buffer by only waiting for the write to complete.
Chris Wilson381f3712016-08-04 07:52:29 +01002253 */
2254 struct i915_gem_active last_read[I915_NUM_ENGINES];
2255 struct i915_gem_active last_write;
Eric Anholt673a3942008-07-30 12:06:12 -07002256
Daniel Vetter80075d42013-10-09 21:23:52 +02002257 /** References from framebuffers, locks out tiling changes. */
2258 unsigned long framebuffer_references;
2259
Eric Anholt280b7132009-03-12 16:56:27 -07002260 /** Record of address bit 17 of each page at last unbind. */
Chris Wilsond312ec22010-06-06 15:40:22 +01002261 unsigned long *bit_17;
Eric Anholt280b7132009-03-12 16:56:27 -07002262
Chris Wilson5cc9ed42014-05-16 14:22:37 +01002263 union {
Chris Wilson6a2c4232014-11-04 04:51:40 -08002264 /** for phy allocated objects */
2265 struct drm_dma_handle *phys_handle;
2266
Chris Wilson5cc9ed42014-05-16 14:22:37 +01002267 struct i915_gem_userptr {
2268 uintptr_t ptr;
2269 unsigned read_only :1;
2270 unsigned workers :4;
2271#define I915_GEM_USERPTR_MAX_WORKERS 15
2272
Chris Wilsonad46cb52014-08-07 14:20:40 +01002273 struct i915_mm_struct *mm;
2274 struct i915_mmu_object *mmu_object;
Chris Wilson5cc9ed42014-05-16 14:22:37 +01002275 struct work_struct *work;
2276 } userptr;
2277 };
2278};
Chris Wilson03ac0642016-07-20 13:31:51 +01002279
2280static inline struct drm_i915_gem_object *
2281to_intel_bo(struct drm_gem_object *gem)
2282{
2283 /* Assert that to_intel_bo(NULL) == NULL */
2284 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
2285
2286 return container_of(gem, struct drm_i915_gem_object, base);
2287}
2288
2289static inline struct drm_i915_gem_object *
2290i915_gem_object_lookup(struct drm_file *file, u32 handle)
2291{
2292 return to_intel_bo(drm_gem_object_lookup(file, handle));
2293}
2294
2295__deprecated
2296extern struct drm_gem_object *
2297drm_gem_object_lookup(struct drm_file *file, u32 handle);
Daniel Vetter23010e42010-03-08 13:35:02 +01002298
Chris Wilson25dc5562016-07-20 13:31:52 +01002299__attribute__((nonnull))
2300static inline struct drm_i915_gem_object *
2301i915_gem_object_get(struct drm_i915_gem_object *obj)
2302{
2303 drm_gem_object_reference(&obj->base);
2304 return obj;
2305}
2306
2307__deprecated
2308extern void drm_gem_object_reference(struct drm_gem_object *);
2309
Chris Wilsonf8c417c2016-07-20 13:31:53 +01002310__attribute__((nonnull))
2311static inline void
2312i915_gem_object_put(struct drm_i915_gem_object *obj)
2313{
2314 drm_gem_object_unreference(&obj->base);
2315}
2316
2317__deprecated
2318extern void drm_gem_object_unreference(struct drm_gem_object *);
2319
Chris Wilson34911fd2016-07-20 13:31:54 +01002320__attribute__((nonnull))
2321static inline void
2322i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
2323{
2324 drm_gem_object_unreference_unlocked(&obj->base);
2325}
2326
2327__deprecated
2328extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
2329
Chris Wilsonb9bcd142016-06-20 15:05:51 +01002330static inline bool
2331i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
2332{
2333 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
2334}
2335
Chris Wilson573adb32016-08-04 16:32:39 +01002336static inline unsigned long
2337i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
2338{
2339 return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
2340}
2341
2342static inline bool
2343i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
2344{
2345 return i915_gem_object_get_active(obj);
2346}
2347
2348static inline void
2349i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
2350{
2351 obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
2352}
2353
2354static inline void
2355i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
2356{
2357 obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
2358}
2359
2360static inline bool
2361i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
2362 int engine)
2363{
2364 return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
2365}
2366
Chris Wilson3e510a82016-08-05 10:14:23 +01002367static inline unsigned int
2368i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
2369{
2370 return obj->tiling_and_stride & TILING_MASK;
2371}
2372
2373static inline bool
2374i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
2375{
2376 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
2377}
2378
2379static inline unsigned int
2380i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
2381{
2382 return obj->tiling_and_stride & STRIDE_MASK;
2383}
2384
Chris Wilson624192c2016-08-15 10:48:50 +01002385static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
2386{
2387 i915_gem_object_get(vma->obj);
2388 return vma;
2389}
2390
2391static inline void i915_vma_put(struct i915_vma *vma)
2392{
2393 lockdep_assert_held(&vma->vm->dev->struct_mutex);
2394 i915_gem_object_put(vma->obj);
2395}
2396
Dave Gordon85d12252016-05-20 11:54:06 +01002397/*
2398 * Optimised SGL iterator for GEM objects
2399 */
2400static __always_inline struct sgt_iter {
2401 struct scatterlist *sgp;
2402 union {
2403 unsigned long pfn;
2404 dma_addr_t dma;
2405 };
2406 unsigned int curr;
2407 unsigned int max;
2408} __sgt_iter(struct scatterlist *sgl, bool dma) {
2409 struct sgt_iter s = { .sgp = sgl };
2410
2411 if (s.sgp) {
2412 s.max = s.curr = s.sgp->offset;
2413 s.max += s.sgp->length;
2414 if (dma)
2415 s.dma = sg_dma_address(s.sgp);
2416 else
2417 s.pfn = page_to_pfn(sg_page(s.sgp));
2418 }
2419
2420 return s;
2421}
2422
2423/**
Dave Gordon63d15322016-05-20 11:54:07 +01002424 * __sg_next - return the next scatterlist entry in a list
2425 * @sg: The current sg entry
2426 *
2427 * Description:
2428 * If the entry is the last, return NULL; otherwise, step to the next
2429 * element in the array (@sg@+1). If that's a chain pointer, follow it;
2430 * otherwise just return the pointer to the current element.
2431 **/
2432static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2433{
2434#ifdef CONFIG_DEBUG_SG
2435 BUG_ON(sg->sg_magic != SG_MAGIC);
2436#endif
2437 return sg_is_last(sg) ? NULL :
2438 likely(!sg_is_chain(++sg)) ? sg :
2439 sg_chain_ptr(sg);
2440}
2441
2442/**
Dave Gordon85d12252016-05-20 11:54:06 +01002443 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
2444 * @__dmap: DMA address (output)
2445 * @__iter: 'struct sgt_iter' (iterator state, internal)
2446 * @__sgt: sg_table to iterate over (input)
2447 */
2448#define for_each_sgt_dma(__dmap, __iter, __sgt) \
2449 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
2450 ((__dmap) = (__iter).dma + (__iter).curr); \
2451 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
Dave Gordon63d15322016-05-20 11:54:07 +01002452 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
Dave Gordon85d12252016-05-20 11:54:06 +01002453
2454/**
2455 * for_each_sgt_page - iterate over the pages of the given sg_table
2456 * @__pp: page pointer (output)
2457 * @__iter: 'struct sgt_iter' (iterator state, internal)
2458 * @__sgt: sg_table to iterate over (input)
2459 */
2460#define for_each_sgt_page(__pp, __iter, __sgt) \
2461 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
2462 ((__pp) = (__iter).pfn == 0 ? NULL : \
2463 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2464 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
Dave Gordon63d15322016-05-20 11:54:07 +01002465 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
Daniel Vettera071fa02014-06-18 23:28:09 +02002466
Brad Volkin351e3db2014-02-18 10:15:46 -08002467/*
2468 * A command that requires special handling by the command parser.
2469 */
2470struct drm_i915_cmd_descriptor {
2471 /*
2472 * Flags describing how the command parser processes the command.
2473 *
2474 * CMD_DESC_FIXED: The command has a fixed length if this is set,
2475 * a length mask if not set
2476 * CMD_DESC_SKIP: The command is allowed but does not follow the
2477 * standard length encoding for the opcode range in
2478 * which it falls
2479 * CMD_DESC_REJECT: The command is never allowed
2480 * CMD_DESC_REGISTER: The command should be checked against the
2481 * register whitelist for the appropriate ring
2482 * CMD_DESC_MASTER: The command is allowed if the submitting process
2483 * is the DRM master
2484 */
2485 u32 flags;
2486#define CMD_DESC_FIXED (1<<0)
2487#define CMD_DESC_SKIP (1<<1)
2488#define CMD_DESC_REJECT (1<<2)
2489#define CMD_DESC_REGISTER (1<<3)
2490#define CMD_DESC_BITMASK (1<<4)
2491#define CMD_DESC_MASTER (1<<5)
2492
2493 /*
2494 * The command's unique identification bits and the bitmask to get them.
2495 * This isn't strictly the opcode field as defined in the spec and may
2496 * also include type, subtype, and/or subop fields.
2497 */
2498 struct {
2499 u32 value;
2500 u32 mask;
2501 } cmd;
2502
2503 /*
2504 * The command's length. The command is either fixed length (i.e. does
2505 * not include a length field) or has a length field mask. The flag
2506 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
2507 * a length mask. All command entries in a command table must include
2508 * length information.
2509 */
2510 union {
2511 u32 fixed;
2512 u32 mask;
2513 } length;
2514
2515 /*
2516 * Describes where to find a register address in the command to check
2517 * against the ring's register whitelist. Only valid if flags has the
2518 * CMD_DESC_REGISTER bit set.
Francisco Jerez6a65c5b2015-05-29 16:44:13 +03002519 *
2520 * A non-zero step value implies that the command may access multiple
2521 * registers in sequence (e.g. LRI), in that case step gives the
2522 * distance in dwords between individual offset fields.
Brad Volkin351e3db2014-02-18 10:15:46 -08002523 */
2524 struct {
2525 u32 offset;
2526 u32 mask;
Francisco Jerez6a65c5b2015-05-29 16:44:13 +03002527 u32 step;
Brad Volkin351e3db2014-02-18 10:15:46 -08002528 } reg;
2529
2530#define MAX_CMD_DESC_BITMASKS 3
2531 /*
2532 * Describes command checks where a particular dword is masked and
2533 * compared against an expected value. If the command does not match
2534 * the expected value, the parser rejects it. Only valid if flags has
2535 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
2536 * are valid.
Brad Volkind4d48032014-02-18 10:15:54 -08002537 *
2538 * If the check specifies a non-zero condition_mask then the parser
2539 * only performs the check when the bits specified by condition_mask
2540 * are non-zero.
Brad Volkin351e3db2014-02-18 10:15:46 -08002541 */
2542 struct {
2543 u32 offset;
2544 u32 mask;
2545 u32 expected;
Brad Volkind4d48032014-02-18 10:15:54 -08002546 u32 condition_offset;
2547 u32 condition_mask;
Brad Volkin351e3db2014-02-18 10:15:46 -08002548 } bits[MAX_CMD_DESC_BITMASKS];
2549};
2550
2551/*
2552 * A table of commands requiring special handling by the command parser.
2553 *
Chris Wilson33a051a2016-07-27 09:07:26 +01002554 * Each engine has an array of tables. Each table consists of an array of
2555 * command descriptors, which must be sorted with command opcodes in
2556 * ascending order.
Brad Volkin351e3db2014-02-18 10:15:46 -08002557 */
2558struct drm_i915_cmd_table {
2559 const struct drm_i915_cmd_descriptor *table;
2560 int count;
2561};
2562
Chris Wilsondbbe9122014-08-09 19:18:43 +01002563/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
Chris Wilson7312e2d2014-08-13 12:14:12 +01002564#define __I915__(p) ({ \
2565 struct drm_i915_private *__p; \
2566 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
2567 __p = (struct drm_i915_private *)p; \
2568 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
2569 __p = to_i915((struct drm_device *)p); \
2570 else \
2571 BUILD_BUG(); \
2572 __p; \
2573})
David Weinehall351c3b52016-08-22 13:32:41 +03002574#define INTEL_INFO(p) (&__I915__(p)->info)
Jani Nikula3f10e822016-04-07 12:48:17 +03002575#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
Chris Wilson87f1f462014-08-09 19:18:42 +01002576#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
Zou Nan haicae58522010-11-09 17:17:32 +08002577
Jani Nikulae87a0052015-10-20 15:22:02 +03002578#define REVID_FOREVER 0xff
Chris Wilson091387c2016-06-24 14:00:21 +01002579#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
Tvrtko Ursulinac657f62016-05-10 10:57:08 +01002580
2581#define GEN_FOREVER (0)
2582/*
2583 * Returns true if Gen is in inclusive range [Start, End].
2584 *
2585 * Use GEN_FOREVER for unbound start and or end.
2586 */
2587#define IS_GEN(p, s, e) ({ \
2588 unsigned int __s = (s), __e = (e); \
2589 BUILD_BUG_ON(!__builtin_constant_p(s)); \
2590 BUILD_BUG_ON(!__builtin_constant_p(e)); \
2591 if ((__s) != GEN_FOREVER) \
2592 __s = (s) - 1; \
2593 if ((__e) == GEN_FOREVER) \
2594 __e = BITS_PER_LONG - 1; \
2595 else \
2596 __e = (e) - 1; \
2597 !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
2598})
2599
Jani Nikulae87a0052015-10-20 15:22:02 +03002600/*
2601 * Return true if revision is in range [since,until] inclusive.
2602 *
2603 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
2604 */
2605#define IS_REVID(p, since, until) \
2606 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
2607
Chris Wilson87f1f462014-08-09 19:18:42 +01002608#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
2609#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
Zou Nan haicae58522010-11-09 17:17:32 +08002610#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
Chris Wilson87f1f462014-08-09 19:18:42 +01002611#define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572)
Zou Nan haicae58522010-11-09 17:17:32 +08002612#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
Chris Wilson87f1f462014-08-09 19:18:42 +01002613#define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592)
2614#define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772)
Zou Nan haicae58522010-11-09 17:17:32 +08002615#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
2616#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
2617#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
Chris Wilson87f1f462014-08-09 19:18:42 +01002618#define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42)
Zou Nan haicae58522010-11-09 17:17:32 +08002619#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
Chris Wilson87f1f462014-08-09 19:18:42 +01002620#define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001)
2621#define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011)
Zou Nan haicae58522010-11-09 17:17:32 +08002622#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
2623#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
Chris Wilson87f1f462014-08-09 19:18:42 +01002624#define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046)
Jesse Barnes4b651772011-04-28 14:33:09 -07002625#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
Chris Wilson87f1f462014-08-09 19:18:42 +01002626#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
2627 INTEL_DEVID(dev) == 0x0152 || \
2628 INTEL_DEVID(dev) == 0x015a)
Jesse Barnes70a3eb72012-03-28 13:39:21 -07002629#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
Wayne Boyer666a4532015-12-09 12:29:35 -08002630#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
Eugeni Dodonov4cae9ae2012-03-29 12:32:18 -03002631#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
Tvrtko Ursulinab0d24a2016-05-10 10:57:05 +01002632#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
Satheeshakrishna M7201c0b2014-04-02 11:24:50 +05302633#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
Rodrigo Vivi7526ac12015-10-27 10:14:54 -07002634#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07002635#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
Zou Nan haicae58522010-11-09 17:17:32 +08002636#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
Paulo Zanonied1c9e22013-08-12 14:34:08 -03002637#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
Chris Wilson87f1f462014-08-09 19:18:42 +01002638 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
Ben Widawsky5dd8c4c2013-11-08 10:20:06 -08002639#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
Rodrigo Vivi6b96d702015-01-19 16:16:15 -08002640 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
Rodrigo Vivi0dc6f202015-01-21 11:46:32 -08002641 (INTEL_DEVID(dev) & 0xf) == 0xb || \
Chris Wilson87f1f462014-08-09 19:18:42 +01002642 (INTEL_DEVID(dev) & 0xf) == 0xe))
Ville Syrjäläebb72aa2015-06-03 15:45:12 +03002643/* ULX machines are also considered ULT. */
2644#define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \
2645 (INTEL_DEVID(dev) & 0xf) == 0xe)
Rodrigo Vivia0fcbd92014-09-19 20:16:26 -04002646#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
2647 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
Ben Widawsky5dd8c4c2013-11-08 10:20:06 -08002648#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
Chris Wilson87f1f462014-08-09 19:18:42 +01002649 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
Rodrigo Vivi94353732013-08-28 16:45:46 -03002650#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
Chris Wilson87f1f462014-08-09 19:18:42 +01002651 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
Paulo Zanoni9bbfd202014-04-29 11:00:22 -03002652/* ULX machines are also considered ULT. */
Chris Wilson87f1f462014-08-09 19:18:42 +01002653#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
2654 INTEL_DEVID(dev) == 0x0A1E)
David Weinehallf8896f52015-06-25 11:11:03 +03002655#define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \
2656 INTEL_DEVID(dev) == 0x1913 || \
2657 INTEL_DEVID(dev) == 0x1916 || \
2658 INTEL_DEVID(dev) == 0x1921 || \
2659 INTEL_DEVID(dev) == 0x1926)
2660#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
2661 INTEL_DEVID(dev) == 0x1915 || \
2662 INTEL_DEVID(dev) == 0x191E)
Rodrigo Vivia5b79912015-12-08 16:58:37 -08002663#define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \
2664 INTEL_DEVID(dev) == 0x5913 || \
2665 INTEL_DEVID(dev) == 0x5916 || \
2666 INTEL_DEVID(dev) == 0x5921 || \
2667 INTEL_DEVID(dev) == 0x5926)
2668#define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \
2669 INTEL_DEVID(dev) == 0x5915 || \
2670 INTEL_DEVID(dev) == 0x591E)
Sagar Arun Kamble7a58bad2015-09-12 10:17:50 +05302671#define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \
2672 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
2673#define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \
2674 (INTEL_DEVID(dev) & 0x00F0) == 0x0030)
2675
Ben Widawskyb833d682013-08-23 16:00:07 -07002676#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
Zou Nan haicae58522010-11-09 17:17:32 +08002677
Jani Nikulaef712bb2015-10-20 15:22:00 +03002678#define SKL_REVID_A0 0x0
2679#define SKL_REVID_B0 0x1
2680#define SKL_REVID_C0 0x2
2681#define SKL_REVID_D0 0x3
2682#define SKL_REVID_E0 0x4
2683#define SKL_REVID_F0 0x5
Mika Kuoppala4ba9c1f2016-07-20 14:26:12 +03002684#define SKL_REVID_G0 0x6
2685#define SKL_REVID_H0 0x7
Hoath, Nicholase90a21d2015-02-05 10:47:17 +00002686
Jani Nikulae87a0052015-10-20 15:22:02 +03002687#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2688
Jani Nikulaef712bb2015-10-20 15:22:00 +03002689#define BXT_REVID_A0 0x0
Jani Nikulafffda3f2015-10-20 15:22:01 +03002690#define BXT_REVID_A1 0x1
Jani Nikulaef712bb2015-10-20 15:22:00 +03002691#define BXT_REVID_B0 0x3
2692#define BXT_REVID_C0 0x9
Nick Hoath6c74c872015-03-20 09:03:52 +00002693
Jani Nikulae87a0052015-10-20 15:22:02 +03002694#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
2695
Mika Kuoppalac033a372016-06-07 17:18:55 +03002696#define KBL_REVID_A0 0x0
2697#define KBL_REVID_B0 0x1
Mika Kuoppalafe905812016-06-07 17:19:03 +03002698#define KBL_REVID_C0 0x2
2699#define KBL_REVID_D0 0x3
2700#define KBL_REVID_E0 0x4
Mika Kuoppalac033a372016-06-07 17:18:55 +03002701
2702#define IS_KBL_REVID(p, since, until) \
2703 (IS_KABYLAKE(p) && IS_REVID(p, since, until))
2704
Jesse Barnes85436692011-04-06 12:11:14 -07002705/*
2706 * The genX designation typically refers to the render engine, so render
2707 * capability related checks should use IS_GEN, while display and other checks
2708 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2709 * chips, etc.).
2710 */
Tvrtko Ursulinaf1346a2016-07-04 15:50:23 +01002711#define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
2712#define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
2713#define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
2714#define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
2715#define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
2716#define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
2717#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
2718#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
Zou Nan haicae58522010-11-09 17:17:32 +08002719
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002720#define ENGINE_MASK(id) BIT(id)
2721#define RENDER_RING ENGINE_MASK(RCS)
2722#define BSD_RING ENGINE_MASK(VCS)
2723#define BLT_RING ENGINE_MASK(BCS)
2724#define VEBOX_RING ENGINE_MASK(VECS)
2725#define BSD2_RING ENGINE_MASK(VCS2)
2726#define ALL_ENGINES (~0)
Mika Kuoppalaee4b6fa2016-03-16 17:54:00 +02002727
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002728#define HAS_ENGINE(dev_priv, id) \
Tvrtko Ursulinaf1346a2016-07-04 15:50:23 +01002729 (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002730
2731#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
2732#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
2733#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
2734#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
2735
Ben Widawsky63c42e52014-04-18 18:04:27 -03002736#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
Tvrtko Ursulinca377802016-03-02 12:10:31 +00002737#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
Tvrtko Ursulinaf1346a2016-07-04 15:50:23 +01002738#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
Ben Widawsky63c42e52014-04-18 18:04:27 -03002739#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002740 HAS_EDRAM(dev))
Zou Nan haicae58522010-11-09 17:17:32 +08002741#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
2742
Ben Widawsky254f9652012-06-04 14:42:42 -07002743#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
Oscar Mateod7f621e2014-07-24 17:04:49 +01002744#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
Jesse Barnes692ef702014-08-05 07:51:18 -07002745#define USES_PPGTT(dev) (i915.enable_ppgtt)
Michel Thierry81ba8aef2015-08-03 09:52:01 +01002746#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2)
2747#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3)
Daniel Vetter1d2a3142012-02-09 17:15:46 +01002748
Chris Wilson05394f32010-11-08 19:18:58 +00002749#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
Zou Nan haicae58522010-11-09 17:17:32 +08002750#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
2751
Daniel Vetterb45305f2012-12-17 16:21:27 +01002752/* Early gen2 have a totally busted CS tlb and require pinned batches. */
2753#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
Mika Kuoppala06e668a2015-12-16 19:18:37 +02002754
2755/* WaRsDisableCoarsePowerGating:skl,bxt */
Tvrtko Ursulin61251512016-06-21 15:07:14 +01002756#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
2757 (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
2758 IS_SKL_GT3(dev_priv) || \
2759 IS_SKL_GT4(dev_priv))
Mika Kuoppala185c66e2016-04-05 15:56:16 +03002760
Daniel Vetter4e6b7882014-02-07 16:33:20 +01002761/*
2762 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
2763 * even when in MSI mode. This results in spurious interrupt warnings if the
2764 * legacy irq no. is shared with another device. The kernel then disables that
2765 * interrupt source and so prevents the other device from working properly.
2766 */
2767#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
2768#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
Daniel Vetterb45305f2012-12-17 16:21:27 +01002769
Zou Nan haicae58522010-11-09 17:17:32 +08002770/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
2771 * rows, which changed the alignment requirements and fence programming.
2772 */
2773#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
2774 IS_I915GM(dev)))
Zou Nan haicae58522010-11-09 17:17:32 +08002775#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
2776#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
Zou Nan haicae58522010-11-09 17:17:32 +08002777
2778#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
2779#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
Daniel Vetter3a77c4c2014-01-10 08:50:12 +01002780#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
Zou Nan haicae58522010-11-09 17:17:32 +08002781
Damien Lespiaudbf77862014-10-01 20:04:14 +01002782#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
Damien Lespiauf5adf942013-06-24 18:29:34 +01002783
Jani Nikula0c9b3712015-05-18 17:10:01 +03002784#define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
2785 INTEL_INFO(dev)->gen >= 9)
2786
Damien Lespiaudd93be52013-04-22 18:40:39 +01002787#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
Damien Lespiau30568c42013-04-22 18:40:41 +01002788#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
Carlos Santa6e3b84d2016-08-17 12:30:36 -07002789#define HAS_PSR(dev) (INTEL_INFO(dev)->has_psr)
Carlos Santa4aa4c232016-08-17 12:30:39 -07002790#define HAS_RUNTIME_PM(dev) (INTEL_INFO(dev)->has_runtime_pm)
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -07002791#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002792#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
Paulo Zanoniaffa9352012-11-23 15:30:39 -02002793
Animesh Manna7b403ff2015-08-04 22:02:42 +05302794#define HAS_CSR(dev) (IS_GEN9(dev))
Daniel Vettereb805622015-05-04 14:58:44 +02002795
Dave Gordon1a3d1892016-05-13 15:36:30 +01002796/*
2797 * For now, anything with a GuC requires uCode loading, and then supports
2798 * command submission once loaded. But these are logically independent
2799 * properties, so we have separate macros to test them.
2800 */
Peter Antoine6f8be282016-06-30 09:37:51 -07002801#define HAS_GUC(dev) (IS_GEN9(dev))
Dave Gordon1a3d1892016-05-13 15:36:30 +01002802#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
2803#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
Alex Dai33a732f2015-08-12 15:43:36 +01002804
Abdiel Janulguea9ed33c2015-07-01 10:12:23 +03002805#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
2806 INTEL_INFO(dev)->gen >= 8)
2807
Akash Goel97d33082015-06-29 14:50:23 +05302808#define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \
Wayne Boyer666a4532015-12-09 12:29:35 -08002809 !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
2810 !IS_BROXTON(dev))
Akash Goel97d33082015-06-29 14:50:23 +05302811
arun.siluvery@linux.intel.com33e141e2016-06-03 06:34:33 +01002812#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu)
2813
Paulo Zanoni17a303e2012-11-20 15:12:07 -02002814#define INTEL_PCH_DEVICE_ID_MASK 0xff00
2815#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
2816#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
2817#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
2818#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
2819#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
Satheeshakrishna Me7e7ea22014-04-09 11:08:57 +05302820#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
2821#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
Rodrigo Vivi22dea0b2016-07-01 17:07:12 -07002822#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
Robert Beckett30c964a2015-08-28 13:10:22 +01002823#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
Jesse Barnes1844a662016-03-16 13:31:30 -07002824#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
Gerd Hoffmann39bfcd522015-11-26 12:03:51 +01002825#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
Paulo Zanoni17a303e2012-11-20 15:12:07 -02002826
Chris Wilsonf2fbc692014-08-24 19:35:31 +01002827#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
Rodrigo Vivi22dea0b2016-07-01 17:07:12 -07002828#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
Satheeshakrishna Me7e7ea22014-04-09 11:08:57 +05302829#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
Eugeni Dodonoveb877eb2012-03-29 12:32:20 -03002830#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
Ville Syrjäläc2699522015-08-27 23:55:59 +03002831#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
Ville Syrjälä56f5f702015-11-30 16:23:44 +02002832#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
Zou Nan haicae58522010-11-09 17:17:32 +08002833#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
2834#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
Ben Widawsky40c7ead2013-04-05 13:12:40 -07002835#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
Paulo Zanoni45e6e3a2012-07-03 15:57:32 -03002836#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
Zou Nan haicae58522010-11-09 17:17:32 +08002837
Wayne Boyer666a4532015-12-09 12:29:35 -08002838#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \
2839 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Sonika Jindal5fafe292014-07-21 15:23:38 +05302840
Ben Widawsky040d2ba2013-09-19 11:01:40 -07002841/* DPF == dynamic parity feature */
2842#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
2843#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
Ben Widawskye1ef7cc2012-07-24 20:47:31 -07002844
Ben Widawskyc8735b02012-09-07 19:43:39 -07002845#define GT_FREQUENCY_MULTIPLIER 50
Akash Goelde43ae92015-03-06 11:07:14 +05302846#define GEN9_FREQ_SCALER 3
Ben Widawskyc8735b02012-09-07 19:43:39 -07002847
Chris Wilson05394f32010-11-08 19:18:58 +00002848#include "i915_trace.h"
2849
Chris Wilson48f112f2016-06-24 14:07:14 +01002850static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2851{
2852#ifdef CONFIG_INTEL_IOMMU
2853 if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped)
2854 return true;
2855#endif
2856 return false;
2857}
2858
Maarten Lankhorst1751fcf2015-08-27 15:15:15 +02002859extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
2860extern int i915_resume_switcheroo(struct drm_device *dev);
Dave Airlie7c1c2872008-11-28 14:22:24 +10002861
Chris Wilsonc0336662016-05-06 15:40:21 +01002862int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
David Weinehall351c3b52016-08-22 13:32:41 +03002863 int enable_ppgtt);
Chris Wilson0e4ca102016-04-29 13:18:22 +01002864
Chris Wilson39df9192016-07-20 13:31:57 +01002865bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
2866
Chris Wilson0673ad42016-06-24 14:00:22 +01002867/* i915_drv.c */
Imre Deakd15d7532016-03-18 10:46:10 +02002868void __printf(3, 4)
2869__i915_printk(struct drm_i915_private *dev_priv, const char *level,
2870 const char *fmt, ...);
2871
2872#define i915_report_error(dev_priv, fmt, ...) \
2873 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
2874
Ben Widawskyc43b5632012-04-16 14:07:40 -07002875#ifdef CONFIG_COMPAT
Dave Airlie0d6aa602006-01-02 20:14:23 +11002876extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2877 unsigned long arg);
Ben Widawskyc43b5632012-04-16 14:07:40 -07002878#endif
Chris Wilsondc979972016-05-10 14:10:04 +01002879extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2880extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
Chris Wilsonc0336662016-05-06 15:40:21 +01002881extern int i915_reset(struct drm_i915_private *dev_priv);
Arun Siluvery6b332fa2016-04-04 18:50:56 +01002882extern int intel_guc_reset(struct drm_i915_private *dev_priv);
Tomas Elffc0768c2016-03-21 16:26:59 +00002883extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
Jesse Barnes7648fa92010-05-20 14:28:11 -07002884extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2885extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2886extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2887extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
Imre Deak650ad972014-04-18 16:35:02 +03002888int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
Jesse Barnes7648fa92010-05-20 14:28:11 -07002889
Jani Nikula77913b32015-06-18 13:06:16 +03002890/* intel_hotplug.c */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002891void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
2892 u32 pin_mask, u32 long_mask);
Jani Nikula77913b32015-06-18 13:06:16 +03002893void intel_hpd_init(struct drm_i915_private *dev_priv);
2894void intel_hpd_init_work(struct drm_i915_private *dev_priv);
2895void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
Imre Deakcc24fcd2015-07-21 15:32:45 -07002896bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
Lyudeb236d7c82016-06-21 17:03:43 -04002897bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2898void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
Jani Nikula77913b32015-06-18 13:06:16 +03002899
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900/* i915_irq.c */
Chris Wilson26a02b82016-07-01 17:23:13 +01002901static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
2902{
2903 unsigned long delay;
2904
2905 if (unlikely(!i915.enable_hangcheck))
2906 return;
2907
2908 /* Don't continually defer the hangcheck so that it is always run at
2909 * least once after work has been scheduled on any ring. Otherwise,
2910 * we will ignore a hung ring if a second ring is kept busy.
2911 */
2912
2913 delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
2914 queue_delayed_work(system_long_wq,
2915 &dev_priv->gpu_error.hangcheck_work, delay);
2916}
2917
Mika Kuoppala58174462014-02-25 17:11:26 +02002918__printf(3, 4)
Chris Wilsonc0336662016-05-06 15:40:21 +01002919void i915_handle_error(struct drm_i915_private *dev_priv,
2920 u32 engine_mask,
Mika Kuoppala58174462014-02-25 17:11:26 +02002921 const char *fmt, ...);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922
Daniel Vetterb9632912014-09-30 10:56:44 +02002923extern void intel_irq_init(struct drm_i915_private *dev_priv);
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02002924int intel_irq_install(struct drm_i915_private *dev_priv);
2925void intel_irq_uninstall(struct drm_i915_private *dev_priv);
Chris Wilson907b28c2013-07-19 20:36:52 +01002926
Chris Wilsondc979972016-05-10 14:10:04 +01002927extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
2928extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
Imre Deak10018602014-06-06 12:59:39 +03002929 bool restore_forcewake);
Chris Wilsondc979972016-05-10 14:10:04 +01002930extern void intel_uncore_init(struct drm_i915_private *dev_priv);
Mika Kuoppalafc976182015-12-15 16:25:07 +02002931extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
Mika Kuoppalabc3b9342016-01-08 15:51:20 +02002932extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
Chris Wilsondc979972016-05-10 14:10:04 +01002933extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
2934extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
2935 bool restore);
Mika Kuoppala48c10262015-01-16 11:34:41 +02002936const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
Mika Kuoppala59bad942015-01-16 11:34:40 +02002937void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
Mika Kuoppala48c10262015-01-16 11:34:41 +02002938 enum forcewake_domains domains);
Mika Kuoppala59bad942015-01-16 11:34:40 +02002939void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
Mika Kuoppala48c10262015-01-16 11:34:41 +02002940 enum forcewake_domains domains);
Chris Wilsona6111f72015-04-07 16:21:02 +01002941/* Like above but the caller must manage the uncore.lock itself.
2942 * Must be used with I915_READ_FW and friends.
2943 */
2944void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
2945 enum forcewake_domains domains);
2946void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
2947 enum forcewake_domains domains);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002948u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
2949
Mika Kuoppala59bad942015-01-16 11:34:40 +02002950void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
Zhi Wang0ad35fe2016-06-16 08:07:00 -04002951
Chris Wilson1758b902016-06-30 15:32:44 +01002952int intel_wait_for_register(struct drm_i915_private *dev_priv,
2953 i915_reg_t reg,
2954 const u32 mask,
2955 const u32 value,
2956 const unsigned long timeout_ms);
2957int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
2958 i915_reg_t reg,
2959 const u32 mask,
2960 const u32 value,
2961 const unsigned long timeout_ms);
2962
Zhi Wang0ad35fe2016-06-16 08:07:00 -04002963static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
2964{
2965 return dev_priv->gvt.initialized;
2966}
2967
Chris Wilsonc0336662016-05-06 15:40:21 +01002968static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
Yu Zhangcf9d2892015-02-10 19:05:47 +08002969{
Chris Wilsonc0336662016-05-06 15:40:21 +01002970 return dev_priv->vgpu.active;
Yu Zhangcf9d2892015-02-10 19:05:47 +08002971}
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002972
Keith Packard7c463582008-11-04 02:03:27 -08002973void
Jani Nikula50227e12014-03-31 14:27:21 +03002974i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002975 u32 status_mask);
Keith Packard7c463582008-11-04 02:03:27 -08002976
2977void
Jani Nikula50227e12014-03-31 14:27:21 +03002978i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002979 u32 status_mask);
Keith Packard7c463582008-11-04 02:03:27 -08002980
Imre Deakf8b79e52014-03-04 19:23:07 +02002981void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
2982void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
Egbert Eich0706f172015-09-23 16:15:27 +02002983void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
2984 uint32_t mask,
2985 uint32_t bits);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02002986void ilk_update_display_irq(struct drm_i915_private *dev_priv,
2987 uint32_t interrupt_mask,
2988 uint32_t enabled_irq_mask);
2989static inline void
2990ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
2991{
2992 ilk_update_display_irq(dev_priv, bits, bits);
2993}
2994static inline void
2995ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
2996{
2997 ilk_update_display_irq(dev_priv, bits, 0);
2998}
Ville Syrjälä013d3752015-11-23 18:06:17 +02002999void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
3000 enum pipe pipe,
3001 uint32_t interrupt_mask,
3002 uint32_t enabled_irq_mask);
3003static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
3004 enum pipe pipe, uint32_t bits)
3005{
3006 bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
3007}
3008static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
3009 enum pipe pipe, uint32_t bits)
3010{
3011 bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
3012}
Daniel Vetter47339cd2014-09-30 10:56:46 +02003013void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
3014 uint32_t interrupt_mask,
3015 uint32_t enabled_irq_mask);
Ville Syrjälä14443262015-11-23 18:06:15 +02003016static inline void
3017ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3018{
3019 ibx_display_interrupt_update(dev_priv, bits, bits);
3020}
3021static inline void
3022ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3023{
3024 ibx_display_interrupt_update(dev_priv, bits, 0);
3025}
3026
Eric Anholt673a3942008-07-30 12:06:12 -07003027/* i915_gem.c */
Eric Anholt673a3942008-07-30 12:06:12 -07003028int i915_gem_create_ioctl(struct drm_device *dev, void *data,
3029 struct drm_file *file_priv);
3030int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
3031 struct drm_file *file_priv);
3032int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
3033 struct drm_file *file_priv);
3034int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
3035 struct drm_file *file_priv);
Jesse Barnesde151cf2008-11-12 10:03:55 -08003036int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
3037 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003038int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
3039 struct drm_file *file_priv);
3040int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
3041 struct drm_file *file_priv);
3042int i915_gem_execbuffer(struct drm_device *dev, void *data,
3043 struct drm_file *file_priv);
Jesse Barnes76446ca2009-12-17 22:05:42 -05003044int i915_gem_execbuffer2(struct drm_device *dev, void *data,
3045 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003046int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3047 struct drm_file *file_priv);
Ben Widawsky199adf42012-09-21 17:01:20 -07003048int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3049 struct drm_file *file);
3050int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3051 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -07003052int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3053 struct drm_file *file_priv);
Chris Wilson3ef94da2009-09-14 16:50:29 +01003054int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3055 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003056int i915_gem_set_tiling(struct drm_device *dev, void *data,
3057 struct drm_file *file_priv);
3058int i915_gem_get_tiling(struct drm_device *dev, void *data,
3059 struct drm_file *file_priv);
Chris Wilson72778cb2016-05-19 16:17:16 +01003060void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
Chris Wilson5cc9ed42014-05-16 14:22:37 +01003061int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
3062 struct drm_file *file);
Eric Anholt5a125c32008-10-22 21:40:13 -07003063int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
3064 struct drm_file *file_priv);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003065int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
3066 struct drm_file *file_priv);
Imre Deakd64aa092016-01-19 15:26:29 +02003067void i915_gem_load_init(struct drm_device *dev);
3068void i915_gem_load_cleanup(struct drm_device *dev);
Imre Deak40ae4e12016-03-16 14:54:03 +02003069void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
Chris Wilson461fb992016-05-14 07:26:33 +01003070int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
3071
Chris Wilson42dcedd2012-11-15 11:32:30 +00003072void *i915_gem_object_alloc(struct drm_device *dev);
3073void i915_gem_object_free(struct drm_i915_gem_object *obj);
Chris Wilson37e680a2012-06-07 15:38:42 +01003074void i915_gem_object_init(struct drm_i915_gem_object *obj,
3075 const struct drm_i915_gem_object_ops *ops);
Dave Gordond37cd8a2016-04-22 19:14:32 +01003076struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003077 size_t size);
Dave Gordonea702992015-07-09 19:29:02 +01003078struct drm_i915_gem_object *i915_gem_object_create_from_data(
3079 struct drm_device *dev, const void *data, size_t size);
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003080void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -07003081void i915_gem_free_object(struct drm_gem_object *obj);
Chris Wilson42dcedd2012-11-15 11:32:30 +00003082
Chris Wilson058d88c2016-08-15 10:49:06 +01003083struct i915_vma * __must_check
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003084i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3085 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +01003086 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +01003087 u64 alignment,
3088 u64 flags);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003089
3090int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
3091 u32 flags);
Chris Wilsond0710ab2015-11-20 14:16:39 +00003092void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003093int __must_check i915_vma_unbind(struct i915_vma *vma);
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003094void i915_vma_close(struct i915_vma *vma);
3095void i915_vma_destroy(struct i915_vma *vma);
Chris Wilsonaa653a62016-08-04 07:52:27 +01003096
3097int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
Chris Wilsondd624af2013-01-15 12:39:35 +00003098int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
Paulo Zanoni48018a52013-12-13 15:22:31 -02003099void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
Chris Wilson05394f32010-11-08 19:18:58 +00003100void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003101
Chris Wilson37e680a2012-06-07 15:38:42 +01003102int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
Chris Wilsonee286372015-04-07 16:20:25 +01003103
3104static inline int __sg_page_count(struct scatterlist *sg)
Chris Wilson9da3da62012-06-01 15:20:22 +01003105{
Chris Wilsonee286372015-04-07 16:20:25 +01003106 return sg->length >> PAGE_SHIFT;
Chris Wilson9da3da62012-06-01 15:20:22 +01003107}
Chris Wilsonee286372015-04-07 16:20:25 +01003108
Dave Gordon033908a2015-12-10 18:51:23 +00003109struct page *
3110i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
3111
Chris Wilson341be1c2016-06-10 14:23:00 +05303112static inline dma_addr_t
3113i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
3114{
3115 if (n < obj->get_page.last) {
3116 obj->get_page.sg = obj->pages->sgl;
3117 obj->get_page.last = 0;
3118 }
3119
3120 while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
3121 obj->get_page.last += __sg_page_count(obj->get_page.sg++);
3122 if (unlikely(sg_is_chain(obj->get_page.sg)))
3123 obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
3124 }
3125
3126 return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
3127}
3128
Chris Wilsonee286372015-04-07 16:20:25 +01003129static inline struct page *
3130i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
3131{
3132 if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
3133 return NULL;
3134
3135 if (n < obj->get_page.last) {
3136 obj->get_page.sg = obj->pages->sgl;
3137 obj->get_page.last = 0;
3138 }
3139
3140 while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
3141 obj->get_page.last += __sg_page_count(obj->get_page.sg++);
3142 if (unlikely(sg_is_chain(obj->get_page.sg)))
3143 obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
3144 }
3145
3146 return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
3147}
3148
Chris Wilsona5570172012-09-04 21:02:54 +01003149static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3150{
3151 BUG_ON(obj->pages == NULL);
3152 obj->pages_pin_count++;
3153}
Chris Wilson0a798eb2016-04-08 12:11:11 +01003154
Chris Wilsona5570172012-09-04 21:02:54 +01003155static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3156{
3157 BUG_ON(obj->pages_pin_count == 0);
3158 obj->pages_pin_count--;
3159}
3160
Chris Wilsond31d7cb2016-08-12 12:39:58 +01003161enum i915_map_type {
3162 I915_MAP_WB = 0,
3163 I915_MAP_WC,
3164};
3165
Chris Wilson0a798eb2016-04-08 12:11:11 +01003166/**
3167 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
3168 * @obj - the object to map into kernel address space
Chris Wilsond31d7cb2016-08-12 12:39:58 +01003169 * @type - the type of mapping, used to select pgprot_t
Chris Wilson0a798eb2016-04-08 12:11:11 +01003170 *
3171 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
3172 * pages and then returns a contiguous mapping of the backing storage into
Chris Wilsond31d7cb2016-08-12 12:39:58 +01003173 * the kernel address space. Based on the @type of mapping, the PTE will be
3174 * set to either WriteBack or WriteCombine (via pgprot_t).
Chris Wilson0a798eb2016-04-08 12:11:11 +01003175 *
Dave Gordon83052162016-04-12 14:46:16 +01003176 * The caller must hold the struct_mutex, and is responsible for calling
3177 * i915_gem_object_unpin_map() when the mapping is no longer required.
Chris Wilson0a798eb2016-04-08 12:11:11 +01003178 *
Dave Gordon83052162016-04-12 14:46:16 +01003179 * Returns the pointer through which to access the mapped object, or an
3180 * ERR_PTR() on error.
Chris Wilson0a798eb2016-04-08 12:11:11 +01003181 */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01003182void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
3183 enum i915_map_type type);
Chris Wilson0a798eb2016-04-08 12:11:11 +01003184
3185/**
3186 * i915_gem_object_unpin_map - releases an earlier mapping
3187 * @obj - the object to unmap
3188 *
3189 * After pinning the object and mapping its pages, once you are finished
3190 * with your access, call i915_gem_object_unpin_map() to release the pin
3191 * upon the mapping. Once the pin count reaches zero, that mapping may be
3192 * removed.
3193 *
3194 * The caller must hold the struct_mutex.
3195 */
3196static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
3197{
3198 lockdep_assert_held(&obj->base.dev->struct_mutex);
3199 i915_gem_object_unpin_pages(obj);
3200}
3201
Chris Wilson43394c72016-08-18 17:16:47 +01003202int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
3203 unsigned int *needs_clflush);
3204int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
3205 unsigned int *needs_clflush);
3206#define CLFLUSH_BEFORE 0x1
3207#define CLFLUSH_AFTER 0x2
3208#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
3209
3210static inline void
3211i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
3212{
3213 i915_gem_object_unpin_pages(obj);
3214}
3215
Chris Wilson54cf91d2010-11-25 18:00:26 +00003216int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
Ben Widawsky2911a352012-04-05 14:47:36 -07003217int i915_gem_object_sync(struct drm_i915_gem_object *obj,
Chris Wilson8e637172016-08-02 22:50:26 +01003218 struct drm_i915_gem_request *to);
Ben Widawskye2d05a82013-09-24 09:57:58 -07003219void i915_vma_move_to_active(struct i915_vma *vma,
Chris Wilson5cf3d282016-08-04 07:52:43 +01003220 struct drm_i915_gem_request *req,
3221 unsigned int flags);
Dave Airlieff72145b2011-02-07 12:16:14 +10003222int i915_gem_dumb_create(struct drm_file *file_priv,
3223 struct drm_device *dev,
3224 struct drm_mode_create_dumb *args);
Dave Airlieda6b51d2014-12-24 13:11:17 +10003225int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3226 uint32_t handle, uint64_t *offset);
Chris Wilson4cc69072016-08-25 19:05:19 +01003227int i915_gem_mmap_gtt_version(void);
Dave Gordon85d12252016-05-20 11:54:06 +01003228
3229void i915_gem_track_fb(struct drm_i915_gem_object *old,
3230 struct drm_i915_gem_object *new,
3231 unsigned frontbuffer_bits);
3232
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02003233int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
Chris Wilson1690e1e2011-12-14 13:57:08 +01003234
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02003235struct drm_i915_gem_request *
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003236i915_gem_find_active_request(struct intel_engine_cs *engine);
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02003237
Chris Wilson67d97da2016-07-04 08:08:31 +01003238void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
Sourab Gupta84c33a62014-06-02 16:47:17 +05303239
Chris Wilsonc19ae982016-04-13 17:35:03 +01003240static inline u32 i915_reset_counter(struct i915_gpu_error *error)
3241{
3242 return atomic_read(&error->reset_counter);
3243}
3244
3245static inline bool __i915_reset_in_progress(u32 reset)
3246{
3247 return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
3248}
3249
3250static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
3251{
3252 return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
3253}
3254
3255static inline bool __i915_terminally_wedged(u32 reset)
3256{
3257 return unlikely(reset & I915_WEDGED);
3258}
3259
Daniel Vetter1f83fee2012-11-15 17:17:22 +01003260static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
3261{
Chris Wilsonc19ae982016-04-13 17:35:03 +01003262 return __i915_reset_in_progress(i915_reset_counter(error));
3263}
3264
3265static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
3266{
3267 return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
Daniel Vetter1f83fee2012-11-15 17:17:22 +01003268}
3269
3270static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
3271{
Chris Wilsonc19ae982016-04-13 17:35:03 +01003272 return __i915_terminally_wedged(i915_reset_counter(error));
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02003273}
3274
3275static inline u32 i915_reset_count(struct i915_gpu_error *error)
3276{
Chris Wilsonc19ae982016-04-13 17:35:03 +01003277 return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
Daniel Vetter1f83fee2012-11-15 17:17:22 +01003278}
Chris Wilsona71d8d92012-02-15 11:25:36 +00003279
Chris Wilson069efc12010-09-30 16:53:18 +01003280void i915_gem_reset(struct drm_device *dev);
Chris Wilson000433b2013-08-08 14:41:09 +01003281bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
Chris Wilson1070a422012-04-24 15:47:41 +01003282int __must_check i915_gem_init(struct drm_device *dev);
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003283int __must_check i915_gem_init_hw(struct drm_device *dev);
3284void i915_gem_init_swizzling(struct drm_device *dev);
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00003285void i915_gem_cleanup_engines(struct drm_device *dev);
Chris Wilsondcff85c2016-08-05 10:14:11 +01003286int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
3287 bool interruptible);
Chris Wilson45c5f202013-10-16 11:50:01 +01003288int __must_check i915_gem_suspend(struct drm_device *dev);
Chris Wilson5ab57c72016-07-15 14:56:20 +01003289void i915_gem_resume(struct drm_device *dev);
Jesse Barnesde151cf2008-11-12 10:03:55 -08003290int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
Chris Wilson20217462010-11-23 15:26:33 +00003291int __must_check
Chris Wilson2e2f3512015-04-27 13:41:14 +01003292i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
3293 bool readonly);
3294int __must_check
Chris Wilson20217462010-11-23 15:26:33 +00003295i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
3296 bool write);
3297int __must_check
Chris Wilsondabdfe02012-03-26 10:10:27 +02003298i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
Chris Wilson058d88c2016-08-15 10:49:06 +01003299struct i915_vma * __must_check
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003300i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3301 u32 alignment,
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003302 const struct i915_ggtt_view *view);
Chris Wilson058d88c2016-08-15 10:49:06 +01003303void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
Chris Wilson00731152014-05-21 12:42:56 +01003304int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003305 int align);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003306int i915_gem_open(struct drm_device *dev, struct drm_file *file);
Chris Wilson05394f32010-11-08 19:18:58 +00003307void i915_gem_release(struct drm_device *dev, struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -07003308
Chris Wilsona9f14812016-08-04 16:32:28 +01003309u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
3310 int tiling_mode);
3311u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
Chris Wilsonad1a7d22016-08-04 16:32:27 +01003312 int tiling_mode, bool fenced);
Chris Wilson467cffb2011-03-07 10:42:03 +00003313
Chris Wilsone4ffd172011-04-04 09:44:39 +01003314int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3315 enum i915_cache_level cache_level);
3316
Daniel Vetter1286ff72012-05-10 15:25:09 +02003317struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
3318 struct dma_buf *dma_buf);
3319
3320struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
3321 struct drm_gem_object *gem_obj, int flags);
3322
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003323struct i915_vma *
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003324i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
Chris Wilson058d88c2016-08-15 10:49:06 +01003325 struct i915_address_space *vm,
3326 const struct i915_ggtt_view *view);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003327
Ben Widawskyaccfef22013-08-14 11:38:35 +02003328struct i915_vma *
3329i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
Chris Wilson058d88c2016-08-15 10:49:06 +01003330 struct i915_address_space *vm,
3331 const struct i915_ggtt_view *view);
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07003332
Daniel Vetter841cd772014-08-06 15:04:48 +02003333static inline struct i915_hw_ppgtt *
3334i915_vm_to_ppgtt(struct i915_address_space *vm)
3335{
Daniel Vetter841cd772014-08-06 15:04:48 +02003336 return container_of(vm, struct i915_hw_ppgtt, base);
3337}
3338
Chris Wilson058d88c2016-08-15 10:49:06 +01003339static inline struct i915_vma *
3340i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
3341 const struct i915_ggtt_view *view)
Ben Widawskya70a3142013-07-31 16:59:56 -07003342{
Chris Wilson058d88c2016-08-15 10:49:06 +01003343 return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
Ben Widawskya70a3142013-07-31 16:59:56 -07003344}
3345
Chris Wilson058d88c2016-08-15 10:49:06 +01003346static inline unsigned long
3347i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
3348 const struct i915_ggtt_view *view)
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003349{
Chris Wilsonbde13eb2016-08-15 10:49:07 +01003350 return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003351}
Daniel Vetterb2871102014-02-14 14:01:19 +01003352
Daniel Vetter41a36b72015-07-24 13:55:11 +02003353/* i915_gem_fence.c */
Chris Wilson49ef5292016-08-18 17:17:00 +01003354int __must_check i915_vma_get_fence(struct i915_vma *vma);
3355int __must_check i915_vma_put_fence(struct i915_vma *vma);
Daniel Vetter41a36b72015-07-24 13:55:11 +02003356
Chris Wilson49ef5292016-08-18 17:17:00 +01003357/**
3358 * i915_vma_pin_fence - pin fencing state
3359 * @vma: vma to pin fencing for
3360 *
3361 * This pins the fencing state (whether tiled or untiled) to make sure the
3362 * vma (and its object) is ready to be used as a scanout target. Fencing
3363 * status must be synchronize first by calling i915_vma_get_fence():
3364 *
3365 * The resulting fence pin reference must be released again with
3366 * i915_vma_unpin_fence().
3367 *
3368 * Returns:
3369 *
3370 * True if the vma has a fence, false otherwise.
3371 */
3372static inline bool
3373i915_vma_pin_fence(struct i915_vma *vma)
3374{
3375 if (vma->fence) {
3376 vma->fence->pin_count++;
3377 return true;
3378 } else
3379 return false;
3380}
3381
3382/**
3383 * i915_vma_unpin_fence - unpin fencing state
3384 * @vma: vma to unpin fencing for
3385 *
3386 * This releases the fence pin reference acquired through
3387 * i915_vma_pin_fence. It will handle both objects with and without an
3388 * attached fence correctly, callers do not need to distinguish this.
3389 */
3390static inline void
3391i915_vma_unpin_fence(struct i915_vma *vma)
3392{
3393 if (vma->fence) {
3394 GEM_BUG_ON(vma->fence->pin_count <= 0);
3395 vma->fence->pin_count--;
3396 }
3397}
Daniel Vetter41a36b72015-07-24 13:55:11 +02003398
3399void i915_gem_restore_fences(struct drm_device *dev);
3400
Daniel Vetter7f96eca2015-07-24 17:40:14 +02003401void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
3402void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
3403void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
3404
Ben Widawsky254f9652012-06-04 14:42:42 -07003405/* i915_gem_context.c */
Ben Widawsky8245be32013-11-06 13:56:29 -02003406int __must_check i915_gem_context_init(struct drm_device *dev);
Chris Wilsonb2e862d2016-04-28 09:56:41 +01003407void i915_gem_context_lost(struct drm_i915_private *dev_priv);
Ben Widawsky254f9652012-06-04 14:42:42 -07003408void i915_gem_context_fini(struct drm_device *dev);
Ben Widawskyacce9ff2013-12-06 14:11:03 -08003409void i915_gem_context_reset(struct drm_device *dev);
Ben Widawskye422b882013-12-06 14:10:58 -08003410int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
Ben Widawsky254f9652012-06-04 14:42:42 -07003411void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
John Harrisonba01cc92015-05-29 17:43:41 +01003412int i915_switch_context(struct drm_i915_gem_request *req);
Chris Wilson945657b2016-07-15 14:56:19 +01003413int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
Mika Kuoppaladce32712013-04-30 13:30:33 +03003414void i915_gem_context_free(struct kref *ctx_ref);
Oscar Mateo8c8579172014-07-24 17:04:14 +01003415struct drm_i915_gem_object *
3416i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
Zhi Wangc8c35792016-06-16 08:07:05 -04003417struct i915_gem_context *
3418i915_gem_context_create_gvt(struct drm_device *dev);
Chris Wilsonca585b52016-05-24 14:53:36 +01003419
3420static inline struct i915_gem_context *
3421i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
3422{
3423 struct i915_gem_context *ctx;
3424
Chris Wilson091387c2016-06-24 14:00:21 +01003425 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
Chris Wilsonca585b52016-05-24 14:53:36 +01003426
3427 ctx = idr_find(&file_priv->context_idr, id);
3428 if (!ctx)
3429 return ERR_PTR(-ENOENT);
3430
3431 return ctx;
3432}
3433
Chris Wilson9a6feaf2016-07-20 13:31:50 +01003434static inline struct i915_gem_context *
3435i915_gem_context_get(struct i915_gem_context *ctx)
Mika Kuoppaladce32712013-04-30 13:30:33 +03003436{
Chris Wilson691e6412014-04-09 09:07:36 +01003437 kref_get(&ctx->ref);
Chris Wilson9a6feaf2016-07-20 13:31:50 +01003438 return ctx;
Mika Kuoppaladce32712013-04-30 13:30:33 +03003439}
3440
Chris Wilson9a6feaf2016-07-20 13:31:50 +01003441static inline void i915_gem_context_put(struct i915_gem_context *ctx)
Mika Kuoppaladce32712013-04-30 13:30:33 +03003442{
Chris Wilson091387c2016-06-24 14:00:21 +01003443 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
Chris Wilson691e6412014-04-09 09:07:36 +01003444 kref_put(&ctx->ref, i915_gem_context_free);
Mika Kuoppaladce32712013-04-30 13:30:33 +03003445}
3446
Chris Wilsone2efd132016-05-24 14:53:34 +01003447static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
Mika Kuoppala3fac8972014-01-30 16:05:48 +02003448{
Oscar Mateo821d66d2014-07-03 16:28:00 +01003449 return c->user_handle == DEFAULT_CONTEXT_HANDLE;
Mika Kuoppala3fac8972014-01-30 16:05:48 +02003450}
3451
Ben Widawsky84624812012-06-04 14:42:54 -07003452int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
3453 struct drm_file *file);
3454int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
3455 struct drm_file *file);
Chris Wilsonc9dc0f32014-12-24 08:13:40 -08003456int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
3457 struct drm_file *file_priv);
3458int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
3459 struct drm_file *file_priv);
Chris Wilsond5387042016-05-13 11:57:19 +01003460int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
3461 struct drm_file *file);
Daniel Vetter1286ff72012-05-10 15:25:09 +02003462
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01003463/* i915_gem_evict.c */
Chris Wilsone522ac22016-08-04 16:32:18 +01003464int __must_check i915_gem_evict_something(struct i915_address_space *vm,
Chris Wilson2ffffd02016-08-04 16:32:22 +01003465 u64 min_size, u64 alignment,
Chris Wilson42d6ab42012-07-26 11:49:32 +01003466 unsigned cache_level,
Chris Wilson2ffffd02016-08-04 16:32:22 +01003467 u64 start, u64 end,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003468 unsigned flags);
Chris Wilson506a8e82015-12-08 11:55:07 +00003469int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
Ben Widawsky68c8c172013-09-11 14:57:50 -07003470int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01003471
Ben Widawsky0260c422014-03-22 22:47:21 -07003472/* belongs in i915_gem_gtt.h */
Chris Wilsonc0336662016-05-06 15:40:21 +01003473static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07003474{
Chris Wilson600f4362016-08-18 17:16:40 +01003475 wmb();
Chris Wilsonc0336662016-05-06 15:40:21 +01003476 if (INTEL_GEN(dev_priv) < 6)
Eric Anholt673a3942008-07-30 12:06:12 -07003477 intel_gtt_chipset_flush();
3478}
Ben Widawsky246cbfb2013-12-06 14:11:14 -08003479
Chris Wilson9797fbf2012-04-24 15:47:39 +01003480/* i915_gem_stolen.c */
Paulo Zanonid713fd42015-07-02 19:25:07 -03003481int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
3482 struct drm_mm_node *node, u64 size,
3483 unsigned alignment);
Paulo Zanonia9da5122015-09-14 15:19:57 -03003484int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
3485 struct drm_mm_node *node, u64 size,
3486 unsigned alignment, u64 start,
3487 u64 end);
Paulo Zanonid713fd42015-07-02 19:25:07 -03003488void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
3489 struct drm_mm_node *node);
Chris Wilson9797fbf2012-04-24 15:47:39 +01003490int i915_gem_init_stolen(struct drm_device *dev);
3491void i915_gem_cleanup_stolen(struct drm_device *dev);
Chris Wilson0104fdb2012-11-15 11:32:26 +00003492struct drm_i915_gem_object *
3493i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
Chris Wilson866d12b2013-02-19 13:31:37 -08003494struct drm_i915_gem_object *
3495i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
3496 u32 stolen_offset,
3497 u32 gtt_offset,
3498 u32 size);
Chris Wilson9797fbf2012-04-24 15:47:39 +01003499
Daniel Vetterbe6a0372015-03-18 10:46:04 +01003500/* i915_gem_shrinker.c */
3501unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
Chris Wilson14387542015-10-01 12:18:25 +01003502 unsigned long target,
Daniel Vetterbe6a0372015-03-18 10:46:04 +01003503 unsigned flags);
3504#define I915_SHRINK_PURGEABLE 0x1
3505#define I915_SHRINK_UNBOUND 0x2
3506#define I915_SHRINK_BOUND 0x4
Chris Wilson5763ff02015-10-01 12:18:29 +01003507#define I915_SHRINK_ACTIVE 0x8
Chris Wilsoneae2c432016-04-08 12:11:12 +01003508#define I915_SHRINK_VMAPS 0x10
Daniel Vetterbe6a0372015-03-18 10:46:04 +01003509unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
3510void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
Imre Deaka8a40582016-01-19 15:26:28 +02003511void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
Daniel Vetterbe6a0372015-03-18 10:46:04 +01003512
3513
Eric Anholt673a3942008-07-30 12:06:12 -07003514/* i915_gem_tiling.c */
Chris Wilson2c1792a2013-08-01 18:39:55 +01003515static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Chris Wilsone9b73c62012-12-03 21:03:14 +00003516{
Chris Wilson091387c2016-06-24 14:00:21 +01003517 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsone9b73c62012-12-03 21:03:14 +00003518
3519 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
Chris Wilson3e510a82016-08-05 10:14:23 +01003520 i915_gem_object_is_tiled(obj);
Chris Wilsone9b73c62012-12-03 21:03:14 +00003521}
3522
Ben Gamari20172632009-02-17 20:08:50 -05003523/* i915_debugfs.c */
Daniel Vetterf8c168f2013-10-16 11:49:58 +02003524#ifdef CONFIG_DEBUG_FS
Chris Wilson1dac8912016-06-24 14:00:17 +01003525int i915_debugfs_register(struct drm_i915_private *dev_priv);
3526void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
Jani Nikula249e87d2015-04-10 16:59:32 +03003527int i915_debugfs_connector_add(struct drm_connector *connector);
David Weinehall36cdd012016-08-22 13:59:31 +03003528void intel_display_crc_init(struct drm_i915_private *dev_priv);
Damien Lespiau07144422013-10-15 18:55:40 +01003529#else
Chris Wilson8d35acb2016-07-12 12:55:29 +01003530static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
3531static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {}
Daniel Vetter101057f2015-07-13 09:23:19 +02003532static inline int i915_debugfs_connector_add(struct drm_connector *connector)
3533{ return 0; }
Maarten Lankhorstce5e2ac2016-08-25 11:07:01 +02003534static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
Damien Lespiau07144422013-10-15 18:55:40 +01003535#endif
Mika Kuoppala84734a02013-07-12 16:50:57 +03003536
3537/* i915_gpu_error.c */
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03003538__printf(2, 3)
3539void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
Mika Kuoppalafc16b482013-06-06 15:18:39 +03003540int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
3541 const struct i915_error_state_file_priv *error);
Mika Kuoppala4dc955f2013-06-06 15:18:41 +03003542int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
Chris Wilson0a4cd7c2014-08-22 14:41:39 +01003543 struct drm_i915_private *i915,
Mika Kuoppala4dc955f2013-06-06 15:18:41 +03003544 size_t count, loff_t pos);
3545static inline void i915_error_state_buf_release(
3546 struct drm_i915_error_state_buf *eb)
3547{
3548 kfree(eb->buf);
3549}
Chris Wilsonc0336662016-05-06 15:40:21 +01003550void i915_capture_error_state(struct drm_i915_private *dev_priv,
3551 u32 engine_mask,
Mika Kuoppala58174462014-02-25 17:11:26 +02003552 const char *error_msg);
Mika Kuoppala84734a02013-07-12 16:50:57 +03003553void i915_error_state_get(struct drm_device *dev,
3554 struct i915_error_state_file_priv *error_priv);
3555void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
3556void i915_destroy_error_state(struct drm_device *dev);
3557
Chris Wilsonc0336662016-05-06 15:40:21 +01003558void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
Chris Wilson0a4cd7c2014-08-22 14:41:39 +01003559const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
Ben Gamari20172632009-02-17 20:08:50 -05003560
Brad Volkin351e3db2014-02-18 10:15:46 -08003561/* i915_cmd_parser.c */
Chris Wilson1ca37122016-05-04 14:25:36 +01003562int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
Chris Wilson7756e452016-08-18 17:17:10 +01003563void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
Chris Wilson33a051a2016-07-27 09:07:26 +01003564void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
3565bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine);
3566int intel_engine_cmd_parser(struct intel_engine_cs *engine,
3567 struct drm_i915_gem_object *batch_obj,
3568 struct drm_i915_gem_object *shadow_batch_obj,
3569 u32 batch_start_offset,
3570 u32 batch_len,
3571 bool is_master);
Brad Volkin351e3db2014-02-18 10:15:46 -08003572
Jesse Barnes317c35d2008-08-25 15:11:06 -07003573/* i915_suspend.c */
3574extern int i915_save_state(struct drm_device *dev);
3575extern int i915_restore_state(struct drm_device *dev);
3576
Ben Widawsky0136db52012-04-10 21:17:01 -07003577/* i915_sysfs.c */
David Weinehall694c2822016-08-22 13:32:43 +03003578void i915_setup_sysfs(struct drm_i915_private *dev_priv);
3579void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
Ben Widawsky0136db52012-04-10 21:17:01 -07003580
Chris Wilsonf899fc62010-07-20 15:44:45 -07003581/* intel_i2c.c */
3582extern int intel_setup_gmbus(struct drm_device *dev);
3583extern void intel_teardown_gmbus(struct drm_device *dev);
Jani Nikula88ac7932015-03-27 00:20:22 +02003584extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
3585 unsigned int pin);
Daniel Kurtz3bd7d902012-03-28 02:36:14 +08003586
Jani Nikula0184df42015-03-27 00:20:20 +02003587extern struct i2c_adapter *
3588intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
Chris Wilsone957d772010-09-24 12:52:03 +01003589extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
3590extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
Jan-Simon Möller8f375e12013-05-06 14:52:08 +02003591static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
Chris Wilsonb8232e92010-09-28 16:41:32 +01003592{
3593 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
3594}
Chris Wilsonf899fc62010-07-20 15:44:45 -07003595extern void intel_i2c_reset(struct drm_device *dev);
3596
Jani Nikula8b8e1a82015-12-14 12:50:49 +02003597/* intel_bios.c */
Jani Nikula98f3a1d2015-12-16 15:04:20 +02003598int intel_bios_init(struct drm_i915_private *dev_priv);
Jani Nikulaf0067a32015-12-15 13:16:15 +02003599bool intel_bios_is_valid_vbt(const void *buf, size_t size);
Jani Nikula3bdd14d2016-03-16 12:43:29 +02003600bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
Jani Nikula5a69d132016-03-16 12:43:30 +02003601bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
Ville Syrjälä22f350422016-06-03 12:17:43 +03003602bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
Jani Nikula951d9ef2016-03-16 12:43:31 +02003603bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
Ville Syrjäläd6199252016-05-04 14:45:22 +03003604bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
Jani Nikula7137aec2016-03-16 12:43:32 +02003605bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
Shubhangi Shrivastavad252bf62016-03-31 16:11:47 +05303606bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3607 enum port port);
Jani Nikula8b8e1a82015-12-14 12:50:49 +02003608
Chris Wilson3b617962010-08-24 09:02:58 +01003609/* intel_opregion.c */
Chris Wilson44834a62010-08-19 16:09:23 +01003610#ifdef CONFIG_ACPI
Chris Wilson6f9f4b72016-05-23 15:08:09 +01003611extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
Chris Wilson03d92e42016-05-23 15:08:10 +01003612extern void intel_opregion_register(struct drm_i915_private *dev_priv);
3613extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003614extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
Jani Nikula9c4b0a62013-08-30 19:40:30 +03003615extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
3616 bool enable);
Chris Wilson6f9f4b72016-05-23 15:08:09 +01003617extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
Jani Nikulaecbc5cf2013-08-30 19:40:31 +03003618 pci_power_t state);
Chris Wilson6f9f4b72016-05-23 15:08:09 +01003619extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
Len Brown65e082c2008-10-24 17:18:10 -04003620#else
Chris Wilson6f9f4b72016-05-23 15:08:09 +01003621static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
Randy Dunlapbdaa2df2016-06-27 14:53:19 +03003622static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
3623static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003624static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
3625{
3626}
Jani Nikula9c4b0a62013-08-30 19:40:30 +03003627static inline int
3628intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
3629{
3630 return 0;
3631}
Jani Nikulaecbc5cf2013-08-30 19:40:31 +03003632static inline int
Chris Wilson6f9f4b72016-05-23 15:08:09 +01003633intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
Jani Nikulaecbc5cf2013-08-30 19:40:31 +03003634{
3635 return 0;
3636}
Chris Wilson6f9f4b72016-05-23 15:08:09 +01003637static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
Ville Syrjäläa0562812016-04-11 10:23:51 +03003638{
3639 return -ENODEV;
3640}
Len Brown65e082c2008-10-24 17:18:10 -04003641#endif
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +01003642
Jesse Barnes723bfd72010-10-07 16:01:13 -07003643/* intel_acpi.c */
3644#ifdef CONFIG_ACPI
3645extern void intel_register_dsm_handler(void);
3646extern void intel_unregister_dsm_handler(void);
3647#else
3648static inline void intel_register_dsm_handler(void) { return; }
3649static inline void intel_unregister_dsm_handler(void) { return; }
3650#endif /* CONFIG_ACPI */
3651
Chris Wilson94b4f3b2016-07-05 10:40:20 +01003652/* intel_device_info.c */
3653static inline struct intel_device_info *
3654mkwrite_device_info(struct drm_i915_private *dev_priv)
3655{
3656 return (struct intel_device_info *)&dev_priv->info;
3657}
3658
3659void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
3660void intel_device_info_dump(struct drm_i915_private *dev_priv);
3661
Jesse Barnes79e53942008-11-07 14:24:08 -08003662/* modesetting */
Daniel Vetterf8175862012-04-10 15:50:11 +02003663extern void intel_modeset_init_hw(struct drm_device *dev);
Jesse Barnes79e53942008-11-07 14:24:08 -08003664extern void intel_modeset_init(struct drm_device *dev);
Chris Wilson2c7111d2011-03-29 10:40:27 +01003665extern void intel_modeset_gem_init(struct drm_device *dev);
Jesse Barnes79e53942008-11-07 14:24:08 -08003666extern void intel_modeset_cleanup(struct drm_device *dev);
Chris Wilson1ebaa0b2016-06-24 14:00:15 +01003667extern int intel_connector_register(struct drm_connector *);
Chris Wilsonc191eca2016-06-17 11:40:33 +01003668extern void intel_connector_unregister(struct drm_connector *);
Dave Airlie28d52042009-09-21 14:33:58 +10003669extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +02003670extern void intel_display_resume(struct drm_device *dev);
Daniel Vetter44cec742013-01-25 17:53:21 +01003671extern void i915_redisable_vga(struct drm_device *dev);
Imre Deak04098752014-02-18 00:02:16 +02003672extern void i915_redisable_vga_power_on(struct drm_device *dev);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003673extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
Paulo Zanonidde86e22012-12-01 12:04:25 -02003674extern void intel_init_pch_refclk(struct drm_device *dev);
Chris Wilsondc979972016-05-10 14:10:04 +01003675extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
Imre Deak5209b1f2014-07-01 12:36:17 +03003676extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3677 bool enable);
Zhenyu Wang3bad0782010-04-07 16:15:53 +08003678
Ben Widawskyc0c7bab2012-07-12 11:01:05 -07003679int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3680 struct drm_file *file);
Jesse Barnes575155a2012-03-28 13:39:37 -07003681
Chris Wilson6ef3d422010-08-04 20:26:07 +01003682/* overlay */
Chris Wilsonc0336662016-05-06 15:40:21 +01003683extern struct intel_overlay_error_state *
3684intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03003685extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3686 struct intel_overlay_error_state *error);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +00003687
Chris Wilsonc0336662016-05-06 15:40:21 +01003688extern struct intel_display_error_state *
3689intel_display_capture_error_state(struct drm_i915_private *dev_priv);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03003690extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +00003691 struct drm_device *dev,
3692 struct intel_display_error_state *error);
Chris Wilson6ef3d422010-08-04 20:26:07 +01003693
Tom O'Rourke151a49d2014-11-13 18:50:10 -08003694int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
3695int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
Jani Nikula59de0812013-05-22 15:36:16 +03003696
3697/* intel_sideband.c */
Deepak S707b6e32015-01-16 20:42:17 +05303698u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
3699void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
Jani Nikula64936252013-05-22 15:36:20 +03003700u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
Deepak Mdfb19ed2016-02-04 18:55:15 +02003701u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
3702void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
Jani Nikulae9f882a2013-08-27 15:12:14 +03003703u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
3704void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3705u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
3706void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
Jesse Barnesf3419152013-11-04 11:52:44 -08003707u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
3708void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003709u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
3710void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
Jani Nikula59de0812013-05-22 15:36:16 +03003711u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
3712 enum intel_sbi_destination destination);
3713void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3714 enum intel_sbi_destination destination);
Shobhit Kumare9fe51c2013-12-10 12:14:55 +05303715u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3716void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003717
Ander Conselvan de Oliveirab7fa22d2016-04-27 15:44:17 +03003718/* intel_dpio_phy.c */
3719void chv_set_phy_signal_level(struct intel_encoder *encoder,
3720 u32 deemph_reg_value, u32 margin_reg_value,
3721 bool uniq_trans_scale);
Ander Conselvan de Oliveira844b2f92016-04-27 15:44:18 +03003722void chv_data_lane_soft_reset(struct intel_encoder *encoder,
3723 bool reset);
Ander Conselvan de Oliveira419b1b72016-04-27 15:44:19 +03003724void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
Ander Conselvan de Oliveirae7d2a7172016-04-27 15:44:20 +03003725void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3726void chv_phy_release_cl2_override(struct intel_encoder *encoder);
Ander Conselvan de Oliveira204970b2016-04-27 15:44:21 +03003727void chv_phy_post_pll_disable(struct intel_encoder *encoder);
Ander Conselvan de Oliveirab7fa22d2016-04-27 15:44:17 +03003728
Ander Conselvan de Oliveira53d98722016-04-27 15:44:22 +03003729void vlv_set_phy_signal_level(struct intel_encoder *encoder,
3730 u32 demph_reg_value, u32 preemph_reg_value,
3731 u32 uniqtranscale_reg_value, u32 tx3_demph);
Ander Conselvan de Oliveira6da2e612016-04-27 15:44:23 +03003732void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
Ander Conselvan de Oliveira5f68c272016-04-27 15:44:24 +03003733void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
Ander Conselvan de Oliveira0f572eb2016-04-27 15:44:25 +03003734void vlv_phy_reset_lanes(struct intel_encoder *encoder);
Ander Conselvan de Oliveira53d98722016-04-27 15:44:22 +03003735
Ville Syrjälä616bc822015-01-23 21:04:25 +02003736int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3737int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
Deepak Sc8d9a592013-11-23 14:55:42 +05303738
Ben Widawsky0b274482013-10-04 21:22:51 -07003739#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
3740#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
Keith Packard5f753772010-11-22 09:24:22 +00003741
Ben Widawsky0b274482013-10-04 21:22:51 -07003742#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
3743#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
3744#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
3745#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
Keith Packard5f753772010-11-22 09:24:22 +00003746
Ben Widawsky0b274482013-10-04 21:22:51 -07003747#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
3748#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
3749#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
3750#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
Keith Packard5f753772010-11-22 09:24:22 +00003751
Chris Wilson698b3132014-03-21 13:16:43 +00003752/* Be very careful with read/write 64-bit values. On 32-bit machines, they
3753 * will be implemented using 2 32-bit writes in an arbitrary order with
3754 * an arbitrary delay between them. This can cause the hardware to
3755 * act upon the intermediate value, possibly leading to corruption and
Chris Wilsonb18c1bb2016-09-06 15:45:38 +01003756 * machine death. For this reason we do not support I915_WRITE64, or
3757 * dev_priv->uncore.funcs.mmio_writeq.
3758 *
3759 * When reading a 64-bit value as two 32-bit values, the delay may cause
3760 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
3761 * occasionally a 64-bit register does not actualy support a full readq
3762 * and must be read using two 32-bit reads.
3763 *
3764 * You have been warned.
Chris Wilson698b3132014-03-21 13:16:43 +00003765 */
Ben Widawsky0b274482013-10-04 21:22:51 -07003766#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
Zou Nan haicae58522010-11-09 17:17:32 +08003767
Chris Wilson50877442014-03-21 12:41:53 +00003768#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
Chris Wilsonacd29f72015-09-08 14:17:13 +01003769 u32 upper, lower, old_upper, loop = 0; \
3770 upper = I915_READ(upper_reg); \
Chris Wilsonee0a2272015-07-15 09:50:42 +01003771 do { \
Chris Wilsonacd29f72015-09-08 14:17:13 +01003772 old_upper = upper; \
Chris Wilsonee0a2272015-07-15 09:50:42 +01003773 lower = I915_READ(lower_reg); \
Chris Wilsonacd29f72015-09-08 14:17:13 +01003774 upper = I915_READ(upper_reg); \
3775 } while (upper != old_upper && loop++ < 2); \
Chris Wilsonee0a2272015-07-15 09:50:42 +01003776 (u64)upper << 32 | lower; })
Chris Wilson50877442014-03-21 12:41:53 +00003777
Zou Nan haicae58522010-11-09 17:17:32 +08003778#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
3779#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
3780
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03003781#define __raw_read(x, s) \
3782static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003783 i915_reg_t reg) \
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03003784{ \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003785 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03003786}
3787
3788#define __raw_write(x, s) \
3789static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003790 i915_reg_t reg, uint##x##_t val) \
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03003791{ \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003792 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03003793}
3794__raw_read(8, b)
3795__raw_read(16, w)
3796__raw_read(32, l)
3797__raw_read(64, q)
3798
3799__raw_write(8, b)
3800__raw_write(16, w)
3801__raw_write(32, l)
3802__raw_write(64, q)
3803
3804#undef __raw_read
3805#undef __raw_write
3806
Chris Wilsona6111f72015-04-07 16:21:02 +01003807/* These are untraced mmio-accessors that are only valid to be used inside
David Weinehall351c3b52016-08-22 13:32:41 +03003808 * critical sections inside IRQ handlers where forcewake is explicitly
Chris Wilsona6111f72015-04-07 16:21:02 +01003809 * controlled.
3810 * Think twice, and think again, before using these.
3811 * Note: Should only be used between intel_uncore_forcewake_irqlock() and
3812 * intel_uncore_forcewake_irqunlock().
3813 */
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03003814#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
3815#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
Chris Wilson76f84212016-06-30 15:33:45 +01003816#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
Chris Wilsona6111f72015-04-07 16:21:02 +01003817#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
3818
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02003819/* "Broadcast RGB" property */
3820#define INTEL_BROADCAST_RGB_AUTO 0
3821#define INTEL_BROADCAST_RGB_FULL 1
3822#define INTEL_BROADCAST_RGB_LIMITED 2
Yuanhan Liuba4f01a2010-11-08 17:09:41 +08003823
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003824static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
Ville Syrjälä766aa1c2013-01-25 21:44:46 +02003825{
Wayne Boyer666a4532015-12-09 12:29:35 -08003826 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Ville Syrjälä766aa1c2013-01-25 21:44:46 +02003827 return VLV_VGACNTRL;
Sonika Jindal92e23b92014-07-21 15:23:40 +05303828 else if (INTEL_INFO(dev)->gen >= 5)
3829 return CPU_VGACNTRL;
Ville Syrjälä766aa1c2013-01-25 21:44:46 +02003830 else
3831 return VGACNTRL;
3832}
3833
Imre Deakdf977292013-05-21 20:03:17 +03003834static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
3835{
3836 unsigned long j = msecs_to_jiffies(m);
3837
3838 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3839}
3840
Daniel Vetter7bd0e222014-12-04 11:12:54 +01003841static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
3842{
3843 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
3844}
3845
Imre Deakdf977292013-05-21 20:03:17 +03003846static inline unsigned long
3847timespec_to_jiffies_timeout(const struct timespec *value)
3848{
3849 unsigned long j = timespec_to_jiffies(value);
3850
3851 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3852}
3853
Paulo Zanonidce56b32013-12-19 14:29:40 -02003854/*
3855 * If you need to wait X milliseconds between events A and B, but event B
3856 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
3857 * when event A happened, then just before event B you call this function and
3858 * pass the timestamp as the first argument, and X as the second argument.
3859 */
3860static inline void
3861wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
3862{
Imre Deakec5e0cf2014-01-29 13:25:40 +02003863 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
Paulo Zanonidce56b32013-12-19 14:29:40 -02003864
3865 /*
3866 * Don't re-read the value of "jiffies" every time since it may change
3867 * behind our back and break the math.
3868 */
3869 tmp_jiffies = jiffies;
3870 target_jiffies = timestamp_jiffies +
3871 msecs_to_jiffies_timeout(to_wait_ms);
3872
3873 if (time_after(target_jiffies, tmp_jiffies)) {
Imre Deakec5e0cf2014-01-29 13:25:40 +02003874 remaining_jiffies = target_jiffies - tmp_jiffies;
3875 while (remaining_jiffies)
3876 remaining_jiffies =
3877 schedule_timeout_uninterruptible(remaining_jiffies);
Paulo Zanonidce56b32013-12-19 14:29:40 -02003878 }
3879}
Chris Wilson688e6c72016-07-01 17:23:15 +01003880static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
3881{
Chris Wilsonf69a02c2016-07-01 17:23:16 +01003882 struct intel_engine_cs *engine = req->engine;
3883
Chris Wilson7ec2c732016-07-01 17:23:22 +01003884 /* Before we do the heavier coherent read of the seqno,
3885 * check the value (hopefully) in the CPU cacheline.
3886 */
3887 if (i915_gem_request_completed(req))
3888 return true;
3889
Chris Wilson688e6c72016-07-01 17:23:15 +01003890 /* Ensure our read of the seqno is coherent so that we
3891 * do not "miss an interrupt" (i.e. if this is the last
3892 * request and the seqno write from the GPU is not visible
3893 * by the time the interrupt fires, we will see that the
3894 * request is incomplete and go back to sleep awaiting
3895 * another interrupt that will never come.)
3896 *
3897 * Strictly, we only need to do this once after an interrupt,
3898 * but it is easier and safer to do it every time the waiter
3899 * is woken.
3900 */
Chris Wilson3d5564e2016-07-01 17:23:23 +01003901 if (engine->irq_seqno_barrier &&
Chris Wilsondbd6ef22016-08-09 17:47:52 +01003902 rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current &&
Chris Wilsonaca34b62016-07-06 12:39:02 +01003903 cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
Chris Wilson99fe4a52016-07-06 12:39:01 +01003904 struct task_struct *tsk;
3905
Chris Wilson3d5564e2016-07-01 17:23:23 +01003906 /* The ordering of irq_posted versus applying the barrier
3907 * is crucial. The clearing of the current irq_posted must
3908 * be visible before we perform the barrier operation,
3909 * such that if a subsequent interrupt arrives, irq_posted
3910 * is reasserted and our task rewoken (which causes us to
3911 * do another __i915_request_irq_complete() immediately
3912 * and reapply the barrier). Conversely, if the clear
3913 * occurs after the barrier, then an interrupt that arrived
3914 * whilst we waited on the barrier would not trigger a
3915 * barrier on the next pass, and the read may not see the
3916 * seqno update.
3917 */
Chris Wilsonf69a02c2016-07-01 17:23:16 +01003918 engine->irq_seqno_barrier(engine);
Chris Wilson99fe4a52016-07-06 12:39:01 +01003919
3920 /* If we consume the irq, but we are no longer the bottom-half,
3921 * the real bottom-half may not have serialised their own
3922 * seqno check with the irq-barrier (i.e. may have inspected
3923 * the seqno before we believe it coherent since they see
3924 * irq_posted == false but we are still running).
3925 */
3926 rcu_read_lock();
Chris Wilsondbd6ef22016-08-09 17:47:52 +01003927 tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
Chris Wilson99fe4a52016-07-06 12:39:01 +01003928 if (tsk && tsk != current)
3929 /* Note that if the bottom-half is changed as we
3930 * are sending the wake-up, the new bottom-half will
3931 * be woken by whomever made the change. We only have
3932 * to worry about when we steal the irq-posted for
3933 * ourself.
3934 */
3935 wake_up_process(tsk);
3936 rcu_read_unlock();
3937
Chris Wilson7ec2c732016-07-01 17:23:22 +01003938 if (i915_gem_request_completed(req))
3939 return true;
3940 }
Chris Wilson688e6c72016-07-01 17:23:15 +01003941
3942 /* We need to check whether any gpu reset happened in between
3943 * the request being submitted and now. If a reset has occurred,
3944 * the seqno will have been advance past ours and our request
3945 * is complete. If we are in the process of handling a reset,
3946 * the request is effectively complete as the rendering will
3947 * be discarded, but we need to return in order to drop the
3948 * struct_mutex.
3949 */
3950 if (i915_reset_in_progress(&req->i915->gpu_error))
3951 return true;
3952
3953 return false;
3954}
3955
Chris Wilson0b1de5d2016-08-12 12:39:59 +01003956void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
3957bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
3958
Chris Wilsonc58305a2016-08-19 16:54:28 +01003959/* i915_mm.c */
3960int remap_io_mapping(struct vm_area_struct *vma,
3961 unsigned long addr, unsigned long pfn, unsigned long size,
3962 struct io_mapping *iomap);
3963
Chris Wilson4b30cb22016-08-18 17:16:42 +01003964#define ptr_mask_bits(ptr) ({ \
3965 unsigned long __v = (unsigned long)(ptr); \
3966 (typeof(ptr))(__v & PAGE_MASK); \
3967})
3968
Chris Wilsond31d7cb2016-08-12 12:39:58 +01003969#define ptr_unpack_bits(ptr, bits) ({ \
3970 unsigned long __v = (unsigned long)(ptr); \
3971 (bits) = __v & ~PAGE_MASK; \
3972 (typeof(ptr))(__v & PAGE_MASK); \
3973})
3974
3975#define ptr_pack_bits(ptr, bits) \
3976 ((typeof(ptr))((unsigned long)(ptr) | (bits)))
3977
Chris Wilson78ef2d92016-08-15 10:48:49 +01003978#define fetch_and_zero(ptr) ({ \
3979 typeof(*ptr) __T = *(ptr); \
3980 *(ptr) = (typeof(*ptr))0; \
3981 __T; \
3982})
3983
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984#endif