blob: d29f95d8999a5eb47e613142a679a3abcd085aaa [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Dave Airliebc54fd12005-06-23 22:46:46 +10004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10007 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110028 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
Chris Wilsone9b73c62012-12-03 21:03:14 +000033#include <uapi/drm/i915_drm.h>
Tvrtko Ursulin93b81f52015-02-10 17:16:05 +000034#include <uapi/drm/drm_fourcc.h>
Chris Wilsone9b73c62012-12-03 21:03:14 +000035
Keith Packard0839ccb2008-10-30 19:38:48 -070036#include <linux/io-mapping.h>
Chris Wilsonf899fc62010-07-20 15:44:45 -070037#include <linux/i2c.h>
Daniel Vetterc167a6f2012-02-28 00:43:09 +010038#include <linux/i2c-algo-bit.h>
Matthew Garrettaaa6fd22011-08-12 12:11:33 +020039#include <linux/backlight.h>
Chris Wilson4ff4b442017-06-16 15:05:16 +010040#include <linux/hash.h>
Ben Widawsky2911a352012-04-05 14:47:36 -070041#include <linux/intel-iommu.h>
Daniel Vetter742cbee2012-04-27 15:17:39 +020042#include <linux/kref.h>
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +000043#include <linux/perf_event.h>
Daniel Vetter9ee32fea2012-12-01 13:53:48 +010044#include <linux/pm_qos.h>
Chris Wilsond07f0e52016-10-28 13:58:44 +010045#include <linux/reservation.h>
Chris Wilsone73bdd22016-04-13 17:35:01 +010046#include <linux/shmem_fs.h>
47
48#include <drm/drmP.h>
49#include <drm/intel-gtt.h>
50#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
51#include <drm/drm_gem.h>
Daniel Vetter3b96a0b2016-06-21 10:54:22 +020052#include <drm/drm_auth.h>
Gabriel Krisman Bertazif9a87bd2017-01-09 19:56:49 -020053#include <drm/drm_cache.h>
Chris Wilsone73bdd22016-04-13 17:35:01 +010054
55#include "i915_params.h"
56#include "i915_reg.h"
Chris Wilson40b326e2017-01-05 15:30:22 +000057#include "i915_utils.h"
Chris Wilsone73bdd22016-04-13 17:35:01 +010058
59#include "intel_bios.h"
Michal Wajdeczkob9785202017-12-21 21:57:32 +000060#include "intel_device_info.h"
Michal Wajdeczko09a28bd2017-12-21 21:57:30 +000061#include "intel_display.h"
Michal Wajdeczko3846a9b2017-12-21 21:57:31 +000062#include "intel_dpll_mgr.h"
63#include "intel_lrc.h"
64#include "intel_opregion.h"
65#include "intel_ringbuffer.h"
66#include "intel_uncore.h"
67#include "intel_uc.h"
Chris Wilsone73bdd22016-04-13 17:35:01 +010068
Chris Wilsond501b1d2016-04-13 17:35:02 +010069#include "i915_gem.h"
Chris Wilson60958682016-12-31 11:20:11 +000070#include "i915_gem_context.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020071#include "i915_gem_fence_reg.h"
72#include "i915_gem_object.h"
Chris Wilsone73bdd22016-04-13 17:35:01 +010073#include "i915_gem_gtt.h"
Chris Wilson05235c52016-07-20 09:21:08 +010074#include "i915_gem_request.h"
Chris Wilson73cb9702016-10-28 13:58:46 +010075#include "i915_gem_timeline.h"
Jesse Barnes585fb112008-07-29 11:54:06 -070076
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020077#include "i915_vma.h"
78
Zhi Wang0ad35fe2016-06-16 08:07:00 -040079#include "intel_gvt.h"
80
Linus Torvalds1da177e2005-04-16 15:20:36 -070081/* General customization:
82 */
83
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#define DRIVER_NAME "i915"
85#define DRIVER_DESC "Intel Graphics"
Rodrigo Vivicfe49822017-12-22 11:41:50 -080086#define DRIVER_DATE "20171222"
87#define DRIVER_TIMESTAMP 1513971710
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Rob Clarke2c719b2014-12-15 13:56:32 -050089/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
90 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
91 * which may not necessarily be a user visible problem. This will either
92 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
93 * enable distros and users to tailor their preferred amount of i915 abrt
94 * spam.
95 */
96#define I915_STATE_WARN(condition, format...) ({ \
97 int __ret_warn_on = !!(condition); \
Joonas Lahtinen32753cb2015-12-18 14:27:26 +020098 if (unlikely(__ret_warn_on)) \
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000099 if (!WARN(i915_modparams.verbose_state_checks, format)) \
Rob Clarke2c719b2014-12-15 13:56:32 -0500100 DRM_ERROR(format); \
Rob Clarke2c719b2014-12-15 13:56:32 -0500101 unlikely(__ret_warn_on); \
102})
103
Joonas Lahtinen152b2262015-12-18 14:27:27 +0200104#define I915_STATE_WARN_ON(x) \
105 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
Mika Kuoppalac883ef12014-10-28 17:32:30 +0200106
Imre Deak4fec15d2016-03-16 13:39:08 +0200107bool __i915_inject_load_failure(const char *func, int line);
108#define i915_inject_load_failure() \
109 __i915_inject_load_failure(__func__, __LINE__)
110
Mahesh Kumarb95320b2016-12-01 21:19:37 +0530111typedef struct {
112 uint32_t val;
113} uint_fixed_16_16_t;
114
115#define FP_16_16_MAX ({ \
116 uint_fixed_16_16_t fp; \
117 fp.val = UINT_MAX; \
118 fp; \
119})
120
Kumar, Maheshd555cb52017-05-17 17:28:29 +0530121static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
122{
123 if (val.val == 0)
124 return true;
125 return false;
126}
127
Kumar, Mahesheac2cb82017-07-05 20:01:46 +0530128static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
Mahesh Kumarb95320b2016-12-01 21:19:37 +0530129{
130 uint_fixed_16_16_t fp;
131
Kumar, Mahesh0b4d7cb2017-08-17 19:15:22 +0530132 WARN_ON(val > U16_MAX);
Mahesh Kumarb95320b2016-12-01 21:19:37 +0530133
134 fp.val = val << 16;
135 return fp;
136}
137
Kumar, Mahesheac2cb82017-07-05 20:01:46 +0530138static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
Mahesh Kumarb95320b2016-12-01 21:19:37 +0530139{
140 return DIV_ROUND_UP(fp.val, 1 << 16);
141}
142
Kumar, Mahesheac2cb82017-07-05 20:01:46 +0530143static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
Mahesh Kumarb95320b2016-12-01 21:19:37 +0530144{
145 return fp.val >> 16;
146}
147
Kumar, Mahesheac2cb82017-07-05 20:01:46 +0530148static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
Mahesh Kumarb95320b2016-12-01 21:19:37 +0530149 uint_fixed_16_16_t min2)
150{
151 uint_fixed_16_16_t min;
152
153 min.val = min(min1.val, min2.val);
154 return min;
155}
156
Kumar, Mahesheac2cb82017-07-05 20:01:46 +0530157static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
Mahesh Kumarb95320b2016-12-01 21:19:37 +0530158 uint_fixed_16_16_t max2)
159{
160 uint_fixed_16_16_t max;
161
162 max.val = max(max1.val, max2.val);
163 return max;
164}
165
Kumar, Mahesh07ab9762017-07-05 20:01:44 +0530166static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
167{
168 uint_fixed_16_16_t fp;
Kumar, Mahesh0b4d7cb2017-08-17 19:15:22 +0530169 WARN_ON(val > U32_MAX);
170 fp.val = (uint32_t) val;
Kumar, Mahesh07ab9762017-07-05 20:01:44 +0530171 return fp;
172}
173
Kumar, Mahesha9d055d2017-05-17 17:28:21 +0530174static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
175 uint_fixed_16_16_t d)
176{
177 return DIV_ROUND_UP(val.val, d.val);
178}
179
180static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
181 uint_fixed_16_16_t mul)
182{
183 uint64_t intermediate_val;
Kumar, Mahesha9d055d2017-05-17 17:28:21 +0530184
185 intermediate_val = (uint64_t) val * mul.val;
186 intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
Kumar, Mahesh0b4d7cb2017-08-17 19:15:22 +0530187 WARN_ON(intermediate_val > U32_MAX);
188 return (uint32_t) intermediate_val;
Kumar, Mahesha9d055d2017-05-17 17:28:21 +0530189}
190
191static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
192 uint_fixed_16_16_t mul)
193{
194 uint64_t intermediate_val;
Kumar, Mahesha9d055d2017-05-17 17:28:21 +0530195
196 intermediate_val = (uint64_t) val.val * mul.val;
197 intermediate_val = intermediate_val >> 16;
Kumar, Mahesh07ab9762017-07-05 20:01:44 +0530198 return clamp_u64_to_fixed16(intermediate_val);
Kumar, Mahesha9d055d2017-05-17 17:28:21 +0530199}
200
Kumar, Mahesheac2cb82017-07-05 20:01:46 +0530201static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
Mahesh Kumarb95320b2016-12-01 21:19:37 +0530202{
Mahesh Kumarb95320b2016-12-01 21:19:37 +0530203 uint64_t interm_val;
204
205 interm_val = (uint64_t)val << 16;
206 interm_val = DIV_ROUND_UP_ULL(interm_val, d);
Kumar, Mahesh07ab9762017-07-05 20:01:44 +0530207 return clamp_u64_to_fixed16(interm_val);
Mahesh Kumarb95320b2016-12-01 21:19:37 +0530208}
209
Kumar, Mahesha9d055d2017-05-17 17:28:21 +0530210static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
211 uint_fixed_16_16_t d)
212{
213 uint64_t interm_val;
214
215 interm_val = (uint64_t)val << 16;
216 interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
Kumar, Mahesh0b4d7cb2017-08-17 19:15:22 +0530217 WARN_ON(interm_val > U32_MAX);
218 return (uint32_t) interm_val;
Kumar, Mahesha9d055d2017-05-17 17:28:21 +0530219}
220
Kumar, Mahesheac2cb82017-07-05 20:01:46 +0530221static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
Mahesh Kumarb95320b2016-12-01 21:19:37 +0530222 uint_fixed_16_16_t mul)
223{
224 uint64_t intermediate_val;
Mahesh Kumarb95320b2016-12-01 21:19:37 +0530225
226 intermediate_val = (uint64_t) val * mul.val;
Kumar, Mahesh07ab9762017-07-05 20:01:44 +0530227 return clamp_u64_to_fixed16(intermediate_val);
Mahesh Kumarb95320b2016-12-01 21:19:37 +0530228}
229
Kumar, Mahesh6ea593c2017-07-05 20:01:47 +0530230static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
231 uint_fixed_16_16_t add2)
232{
233 uint64_t interm_sum;
234
235 interm_sum = (uint64_t) add1.val + add2.val;
236 return clamp_u64_to_fixed16(interm_sum);
237}
238
239static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
240 uint32_t add2)
241{
242 uint64_t interm_sum;
243 uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
244
245 interm_sum = (uint64_t) add1.val + interm_add2.val;
246 return clamp_u64_to_fixed16(interm_sum);
247}
248
Egbert Eich1d843f92013-02-25 12:06:49 -0500249enum hpd_pin {
250 HPD_NONE = 0,
Egbert Eich1d843f92013-02-25 12:06:49 -0500251 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
252 HPD_CRT,
253 HPD_SDVO_B,
254 HPD_SDVO_C,
Imre Deakcc24fcd2015-07-21 15:32:45 -0700255 HPD_PORT_A,
Egbert Eich1d843f92013-02-25 12:06:49 -0500256 HPD_PORT_B,
257 HPD_PORT_C,
258 HPD_PORT_D,
Xiong Zhang26951ca2015-08-17 15:55:50 +0800259 HPD_PORT_E,
Egbert Eich1d843f92013-02-25 12:06:49 -0500260 HPD_NUM_PINS
261};
262
Jani Nikulac91711f2015-05-28 15:43:48 +0300263#define for_each_hpd_pin(__pin) \
264 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
265
Lyude317eaa92017-02-03 21:18:25 -0500266#define HPD_STORM_DEFAULT_THRESHOLD 5
267
Jani Nikula5fcece82015-05-27 15:03:42 +0300268struct i915_hotplug {
269 struct work_struct hotplug_work;
270
271 struct {
272 unsigned long last_jiffies;
273 int count;
274 enum {
275 HPD_ENABLED = 0,
276 HPD_DISABLED = 1,
277 HPD_MARK_DISABLED = 2
278 } state;
279 } stats[HPD_NUM_PINS];
280 u32 event_bits;
281 struct delayed_work reenable_work;
282
283 struct intel_digital_port *irq_port[I915_MAX_PORTS];
284 u32 long_port_mask;
285 u32 short_port_mask;
286 struct work_struct dig_port_work;
287
Lyude19625e82016-06-21 17:03:44 -0400288 struct work_struct poll_init_work;
289 bool poll_enabled;
290
Lyude317eaa92017-02-03 21:18:25 -0500291 unsigned int hpd_storm_threshold;
292
Jani Nikula5fcece82015-05-27 15:03:42 +0300293 /*
294 * if we get a HPD irq from DP and a HPD irq from non-DP
295 * the non-DP HPD could block the workqueue on a mode config
296 * mutex getting, that userspace may have taken. However
297 * userspace is waiting on the DP workqueue to run which is
298 * blocked behind the non-DP one.
299 */
300 struct workqueue_struct *dp_wq;
301};
302
Chris Wilson2a2d5482012-12-03 11:49:06 +0000303#define I915_GEM_GPU_DOMAINS \
304 (I915_GEM_DOMAIN_RENDER | \
305 I915_GEM_DOMAIN_SAMPLER | \
306 I915_GEM_DOMAIN_COMMAND | \
307 I915_GEM_DOMAIN_INSTRUCTION | \
308 I915_GEM_DOMAIN_VERTEX)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700309
Daniel Vettere7b903d2013-06-05 13:34:14 +0200310struct drm_i915_private;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100311struct i915_mm_struct;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100312struct i915_mmu_object;
Daniel Vettere7b903d2013-06-05 13:34:14 +0200313
Chris Wilsona6f766f2015-04-27 13:41:20 +0100314struct drm_i915_file_private {
315 struct drm_i915_private *dev_priv;
316 struct drm_file *file;
317
318 struct {
319 spinlock_t lock;
320 struct list_head request_list;
Chris Wilsond0bc54f2015-05-21 21:01:48 +0100321/* 20ms is a fairly arbitrary limit (greater than the average frame time)
322 * chosen to prevent the CPU getting more than a frame ahead of the GPU
323 * (when using lax throttling for the frontbuffer). We also use it to
324 * offer free GPU waitboosts for severely congested workloads.
325 */
326#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
Chris Wilsona6f766f2015-04-27 13:41:20 +0100327 } mm;
328 struct idr context_idr;
329
Chris Wilson2e1b8732015-04-27 13:41:22 +0100330 struct intel_rps_client {
Chris Wilson7b92c1b2017-06-28 13:35:48 +0100331 atomic_t boosts;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100332 } rps_client;
Chris Wilsona6f766f2015-04-27 13:41:20 +0100333
Chris Wilsonc80ff162016-07-27 09:07:27 +0100334 unsigned int bsd_engine;
Mika Kuoppalab083a082016-11-18 15:10:47 +0200335
336/* Client can have a maximum of 3 contexts banned before
337 * it is denied of creating new contexts. As one context
338 * ban needs 4 consecutive hangs, and more if there is
339 * progress in between, this is a last resort stop gap measure
340 * to limit the badly behaving clients access to gpu.
341 */
342#define I915_MAX_CLIENT_CONTEXT_BANS 3
Chris Wilson77b25a92017-07-21 13:32:30 +0100343 atomic_t context_bans;
Chris Wilsona6f766f2015-04-27 13:41:20 +0100344};
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346/* Interface history:
347 *
348 * 1.1: Original.
Dave Airlie0d6aa602006-01-02 20:14:23 +1100349 * 1.2: Add Power Management
350 * 1.3: Add vblank support
Dave Airliede227f52006-01-25 15:31:43 +1100351 * 1.4: Fix cmdbuffer path, add heap destroy
Dave Airlie702880f2006-06-24 17:07:34 +1000352 * 1.5: Add vblank pipe configuration
=?utf-8?q?Michel_D=C3=A4nzer?=2228ed62006-10-25 01:05:09 +1000353 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
354 * - Support vertical blank on secondary display pipe
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 */
356#define DRIVER_MAJOR 1
=?utf-8?q?Michel_D=C3=A4nzer?=2228ed62006-10-25 01:05:09 +1000357#define DRIVER_MINOR 6
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358#define DRIVER_PATCHLEVEL 0
359
Chris Wilson6ef3d422010-08-04 20:26:07 +0100360struct intel_overlay;
361struct intel_overlay_error_state;
362
yakui_zhao9b9d1722009-05-31 17:17:17 +0800363struct sdvo_device_mapping {
Chris Wilsone957d772010-09-24 12:52:03 +0100364 u8 initialized;
yakui_zhao9b9d1722009-05-31 17:17:17 +0800365 u8 dvo_port;
366 u8 slave_addr;
367 u8 dvo_wiring;
Chris Wilsone957d772010-09-24 12:52:03 +0100368 u8 i2c_pin;
Adam Jacksonb1083332010-04-23 16:07:40 -0400369 u8 ddc_pin;
yakui_zhao9b9d1722009-05-31 17:17:17 +0800370};
371
Jani Nikula7bd688c2013-11-08 16:48:56 +0200372struct intel_connector;
Jani Nikula820d2d72014-10-27 16:26:47 +0200373struct intel_encoder;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +0100374struct intel_atomic_state;
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200375struct intel_crtc_state;
Damien Lespiau5724dbd2015-01-20 12:51:52 +0000376struct intel_initial_plane_config;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +0100377struct intel_crtc;
Daniel Vetteree9300b2013-06-03 22:40:22 +0200378struct intel_limit;
379struct dpll;
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200380struct intel_cdclk_state;
Daniel Vetterb8cecdf2013-03-27 00:44:50 +0100381
Jesse Barnese70236a2009-09-21 10:42:27 -0700382struct drm_i915_display_funcs {
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200383 void (*get_cdclk)(struct drm_i915_private *dev_priv,
384 struct intel_cdclk_state *cdclk_state);
Ville Syrjäläb0587e42017-01-26 21:52:01 +0200385 void (*set_cdclk)(struct drm_i915_private *dev_priv,
386 const struct intel_cdclk_state *cdclk_state);
Ville Syrjäläbdaf8432017-11-17 21:19:11 +0200387 int (*get_fifo_size)(struct drm_i915_private *dev_priv,
388 enum i9xx_plane_id i9xx_plane);
Maarten Lankhorste3bddde2016-03-01 11:07:22 +0100389 int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
Matt Ropered4a6a72016-02-23 17:20:13 -0800390 int (*compute_intermediate_wm)(struct drm_device *dev,
391 struct intel_crtc *intel_crtc,
392 struct intel_crtc_state *newstate);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +0100393 void (*initial_watermarks)(struct intel_atomic_state *state,
394 struct intel_crtc_state *cstate);
395 void (*atomic_update_watermarks)(struct intel_atomic_state *state,
396 struct intel_crtc_state *cstate);
397 void (*optimize_watermarks)(struct intel_atomic_state *state,
398 struct intel_crtc_state *cstate);
Matt Roper98d39492016-05-12 07:06:03 -0700399 int (*compute_global_watermarks)(struct drm_atomic_state *state);
Ville Syrjälä432081b2016-10-31 22:37:03 +0200400 void (*update_wm)(struct intel_crtc *crtc);
Maarten Lankhorst27c329e2015-06-15 12:33:56 +0200401 int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
Daniel Vetter0e8ffe12013-03-28 10:42:00 +0100402 /* Returns the active state of the crtc, and if the crtc is active,
403 * fills out the pipe-config with the hw state. */
404 bool (*get_pipe_config)(struct intel_crtc *,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200405 struct intel_crtc_state *);
Damien Lespiau5724dbd2015-01-20 12:51:52 +0000406 void (*get_initial_plane_config)(struct intel_crtc *,
407 struct intel_initial_plane_config *);
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +0200408 int (*crtc_compute_clock)(struct intel_crtc *crtc,
409 struct intel_crtc_state *crtc_state);
Maarten Lankhorst4a806552016-08-09 17:04:01 +0200410 void (*crtc_enable)(struct intel_crtc_state *pipe_config,
411 struct drm_atomic_state *old_state);
412 void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
413 struct drm_atomic_state *old_state);
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +0200414 void (*update_crtcs)(struct drm_atomic_state *state);
Ville Syrjälä8ec47de2017-10-30 20:46:53 +0200415 void (*audio_codec_enable)(struct intel_encoder *encoder,
416 const struct intel_crtc_state *crtc_state,
417 const struct drm_connector_state *conn_state);
418 void (*audio_codec_disable)(struct intel_encoder *encoder,
419 const struct intel_crtc_state *old_crtc_state,
420 const struct drm_connector_state *old_conn_state);
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +0200421 void (*fdi_link_train)(struct intel_crtc *crtc,
422 const struct intel_crtc_state *crtc_state);
Ville Syrjälä46f16e62016-10-31 22:37:22 +0200423 void (*init_clock_gating)(struct drm_i915_private *dev_priv);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100424 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
Jesse Barnese70236a2009-09-21 10:42:27 -0700425 /* clock updates for mode set */
426 /* cursor updates */
427 /* render clock increase/decrease */
428 /* display clock increase/decrease */
429 /* pll clock increase/decrease */
Lionel Landwerlin8563b1e2016-03-16 10:57:14 +0000430
Maarten Lankhorstb95c5322016-03-30 17:16:34 +0200431 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
432 void (*load_luts)(struct drm_crtc_state *crtc_state);
Jesse Barnese70236a2009-09-21 10:42:27 -0700433};
434
Damien Lespiaub6e7d892015-10-27 14:46:59 +0200435#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
436#define CSR_VERSION_MAJOR(version) ((version) >> 16)
437#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
438
Daniel Vettereb805622015-05-04 14:58:44 +0200439struct intel_csr {
Daniel Vetter8144ac52015-10-28 23:59:04 +0200440 struct work_struct work;
Daniel Vettereb805622015-05-04 14:58:44 +0200441 const char *fw_path;
Animesh Mannaa7f749f2015-08-03 21:55:32 +0530442 uint32_t *dmc_payload;
Daniel Vettereb805622015-05-04 14:58:44 +0200443 uint32_t dmc_fw_size;
Damien Lespiaub6e7d892015-10-27 14:46:59 +0200444 uint32_t version;
Daniel Vettereb805622015-05-04 14:58:44 +0200445 uint32_t mmio_count;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200446 i915_reg_t mmioaddr[8];
Daniel Vettereb805622015-05-04 14:58:44 +0200447 uint32_t mmiodata[8];
Patrik Jakobsson832dba82016-02-18 17:21:11 +0200448 uint32_t dc_state;
Imre Deaka37baf32016-02-29 22:49:03 +0200449 uint32_t allowed_dc_mask;
Daniel Vettereb805622015-05-04 14:58:44 +0200450};
451
Chris Wilson2bd160a2016-08-15 10:48:45 +0100452struct intel_display_error_state;
453
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000454struct i915_gpu_state {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100455 struct kref ref;
Arnd Bergmannc6270db2018-01-17 16:48:53 +0100456 ktime_t time;
457 ktime_t boottime;
458 ktime_t uptime;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100459
Chris Wilson9f267eb2016-10-12 10:05:19 +0100460 struct drm_i915_private *i915;
461
Chris Wilson2bd160a2016-08-15 10:48:45 +0100462 char error_msg[128];
463 bool simulated;
Chris Wilsonf73b5672017-03-02 15:03:56 +0000464 bool awake;
Chris Wilsone5aac872017-03-02 15:15:44 +0000465 bool wakelock;
466 bool suspended;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100467 int iommu;
468 u32 reset_count;
469 u32 suspend_count;
470 struct intel_device_info device_info;
Chris Wilson642c8a72017-02-06 21:36:07 +0000471 struct i915_params params;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100472
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +0000473 struct i915_error_uc {
474 struct intel_uc_fw guc_fw;
475 struct intel_uc_fw huc_fw;
Michal Wajdeczko0397ac12017-10-26 17:36:56 +0000476 struct drm_i915_error_object *guc_log;
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +0000477 } uc;
478
Chris Wilson2bd160a2016-08-15 10:48:45 +0100479 /* Generic register state */
480 u32 eir;
481 u32 pgtbl_er;
482 u32 ier;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000483 u32 gtier[4], ngtier;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100484 u32 ccid;
485 u32 derrmr;
486 u32 forcewake;
487 u32 error; /* gen6+ */
488 u32 err_int; /* gen7 */
489 u32 fault_data0; /* gen8, gen9 */
490 u32 fault_data1; /* gen8, gen9 */
491 u32 done_reg;
492 u32 gac_eco;
493 u32 gam_ecochk;
494 u32 gab_ctl;
495 u32 gfx_mode;
Ben Widawskyd6369512016-09-20 16:54:32 +0300496
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000497 u32 nfence;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100498 u64 fence[I915_MAX_NUM_FENCES];
499 struct intel_overlay_error_state *overlay;
500 struct intel_display_error_state *display;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100501
502 struct drm_i915_error_engine {
503 int engine_id;
504 /* Software tracked state */
Chris Wilson398c8a32017-12-19 13:14:19 +0000505 bool idle;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100506 bool waiting;
507 int num_waiters;
Mika Kuoppala3fe3b032016-11-18 15:09:04 +0200508 unsigned long hangcheck_timestamp;
509 bool hangcheck_stalled;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100510 enum intel_engine_hangcheck_action hangcheck_action;
511 struct i915_address_space *vm;
512 int num_requests;
Michel Thierry702c8f82017-06-20 10:57:48 +0100513 u32 reset_count;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100514
Chris Wilsoncdb324b2016-10-04 21:11:30 +0100515 /* position of active request inside the ring */
516 u32 rq_head, rq_post, rq_tail;
517
Chris Wilson2bd160a2016-08-15 10:48:45 +0100518 /* our own tracking of ring head and tail */
519 u32 cpu_ring_head;
520 u32 cpu_ring_tail;
521
522 u32 last_seqno;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100523
524 /* Register state */
525 u32 start;
526 u32 tail;
527 u32 head;
528 u32 ctl;
Chris Wilson21a2c582016-08-15 10:49:11 +0100529 u32 mode;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100530 u32 hws;
531 u32 ipeir;
532 u32 ipehr;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100533 u32 bbstate;
534 u32 instpm;
535 u32 instps;
536 u32 seqno;
537 u64 bbaddr;
538 u64 acthd;
539 u32 fault_reg;
540 u64 faddr;
541 u32 rc_psmi; /* sleep state */
542 u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
Ben Widawskyd6369512016-09-20 16:54:32 +0300543 struct intel_instdone instdone;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100544
Chris Wilson4fa60532017-01-29 09:24:33 +0000545 struct drm_i915_error_context {
546 char comm[TASK_COMM_LEN];
547 pid_t pid;
548 u32 handle;
549 u32 hw_id;
Chris Wilson1f181222017-10-03 21:34:50 +0100550 int priority;
Chris Wilson4fa60532017-01-29 09:24:33 +0000551 int ban_score;
552 int active;
553 int guilty;
554 } context;
555
Chris Wilson2bd160a2016-08-15 10:48:45 +0100556 struct drm_i915_error_object {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100557 u64 gtt_offset;
Chris Wilson03382df2016-08-15 10:49:09 +0100558 u64 gtt_size;
Chris Wilson0a970152016-10-12 10:05:22 +0100559 int page_count;
560 int unused;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100561 u32 *pages[0];
562 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
563
Chris Wilsonb0fd47a2017-04-15 10:39:02 +0100564 struct drm_i915_error_object **user_bo;
565 long user_bo_count;
566
Chris Wilson2bd160a2016-08-15 10:48:45 +0100567 struct drm_i915_error_object *wa_ctx;
Chris Wilson4e90a6e22017-11-26 22:09:01 +0000568 struct drm_i915_error_object *default_state;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100569
570 struct drm_i915_error_request {
571 long jiffies;
Chris Wilsonc84455b2016-08-15 10:49:08 +0100572 pid_t pid;
Chris Wilson35ca0392016-10-13 11:18:14 +0100573 u32 context;
Chris Wilson1f181222017-10-03 21:34:50 +0100574 int priority;
Mika Kuoppala84102172016-11-16 17:20:32 +0200575 int ban_score;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100576 u32 seqno;
577 u32 head;
578 u32 tail;
Mika Kuoppala76e70082017-09-22 15:43:07 +0300579 } *requests, execlist[EXECLIST_MAX_PORTS];
580 unsigned int num_ports;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100581
582 struct drm_i915_error_waiter {
583 char comm[TASK_COMM_LEN];
584 pid_t pid;
585 u32 seqno;
586 } *waiters;
587
588 struct {
589 u32 gfx_mode;
590 union {
591 u64 pdp[4];
592 u32 pp_dir_base;
593 };
594 } vm_info;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100595 } engine[I915_NUM_ENGINES];
596
597 struct drm_i915_error_buffer {
598 u32 size;
599 u32 name;
600 u32 rseqno[I915_NUM_ENGINES], wseqno;
601 u64 gtt_offset;
602 u32 read_domains;
603 u32 write_domain;
604 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
605 u32 tiling:2;
606 u32 dirty:1;
607 u32 purgeable:1;
608 u32 userptr:1;
609 s32 engine:4;
610 u32 cache_level:3;
611 } *active_bo[I915_NUM_ENGINES], *pinned_bo;
612 u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
613 struct i915_address_space *active_vm[I915_NUM_ENGINES];
614};
615
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800616enum i915_cache_level {
617 I915_CACHE_NONE = 0,
Chris Wilson350ec882013-08-06 13:17:02 +0100618 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
619 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
620 caches, eg sampler/render caches, and the
621 large Last-Level-Cache. LLC is coherent with
622 the CPU, but L3 is only visible to the GPU. */
Chris Wilson651d7942013-08-08 14:41:10 +0100623 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800624};
625
Chris Wilson85fd4f52016-12-05 14:29:36 +0000626#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
627
Paulo Zanonia4001f12015-02-13 17:23:44 -0200628enum fb_op_origin {
629 ORIGIN_GTT,
630 ORIGIN_CPU,
631 ORIGIN_CS,
632 ORIGIN_FLIP,
Paulo Zanoni74b4ea12015-07-14 16:29:14 -0300633 ORIGIN_DIRTYFB,
Paulo Zanonia4001f12015-02-13 17:23:44 -0200634};
635
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200636struct intel_fbc {
Paulo Zanoni25ad93f2015-07-02 19:25:10 -0300637 /* This is always the inner lock when overlapping with struct_mutex and
638 * it's the outer lock when overlapping with stolen_lock. */
639 struct mutex lock;
Ben Widawsky5e59f712014-06-30 10:41:24 -0700640 unsigned threshold;
Paulo Zanonidbef0f12015-02-13 17:23:46 -0200641 unsigned int possible_framebuffer_bits;
642 unsigned int busy_bits;
Paulo Zanoni010cf732016-01-19 11:35:48 -0200643 unsigned int visible_pipes_mask;
Paulo Zanonie35fef22015-02-09 14:46:29 -0200644 struct intel_crtc *crtc;
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700645
Ben Widawskyc4213882014-06-19 12:06:10 -0700646 struct drm_mm_node compressed_fb;
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700647 struct drm_mm_node *compressed_llb;
648
Rodrigo Vivida46f932014-08-01 02:04:45 -0700649 bool false_color;
650
Paulo Zanonid029bca2015-10-15 10:44:46 -0300651 bool enabled;
Paulo Zanoni0e631ad2015-10-14 17:45:36 -0300652 bool active;
Paulo Zanoni9adccc62014-09-19 16:04:55 -0300653
Paulo Zanoni61a585d2016-09-13 10:38:57 -0300654 bool underrun_detected;
655 struct work_struct underrun_work;
656
Paulo Zanoni525a4f92017-07-14 16:38:22 -0300657 /*
658 * Due to the atomic rules we can't access some structures without the
659 * appropriate locking, so we cache information here in order to avoid
660 * these problems.
661 */
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200662 struct intel_fbc_state_cache {
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000663 struct i915_vma *vma;
664
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200665 struct {
666 unsigned int mode_flags;
667 uint32_t hsw_bdw_pixel_rate;
668 } crtc;
669
670 struct {
671 unsigned int rotation;
672 int src_w;
673 int src_h;
674 bool visible;
Juha-Pekka Heikkilabf0a5d42017-10-17 23:08:07 +0300675 /*
676 * Display surface base address adjustement for
677 * pageflips. Note that on gen4+ this only adjusts up
678 * to a tile, offsets within a tile are handled in
679 * the hw itself (with the TILEOFF register).
680 */
681 int adjusted_x;
682 int adjusted_y;
Juha-Pekka Heikkila31d1d3c2017-10-17 23:08:11 +0300683
684 int y;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200685 } plane;
686
687 struct {
Ville Syrjälä801c8fe2016-11-18 21:53:04 +0200688 const struct drm_format_info *format;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200689 unsigned int stride;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200690 } fb;
691 } state_cache;
692
Paulo Zanoni525a4f92017-07-14 16:38:22 -0300693 /*
694 * This structure contains everything that's relevant to program the
695 * hardware registers. When we want to figure out if we need to disable
696 * and re-enable FBC for a new configuration we just check if there's
697 * something different in the struct. The genx_fbc_activate functions
698 * are supposed to read from it in order to program the registers.
699 */
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200700 struct intel_fbc_reg_params {
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000701 struct i915_vma *vma;
702
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200703 struct {
704 enum pipe pipe;
Ville Syrjäläed150302017-11-17 21:19:10 +0200705 enum i9xx_plane_id i9xx_plane;
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200706 unsigned int fence_y_offset;
707 } crtc;
708
709 struct {
Ville Syrjälä801c8fe2016-11-18 21:53:04 +0200710 const struct drm_format_info *format;
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200711 unsigned int stride;
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200712 } fb;
713
714 int cfb_size;
Praveen Paneri5654a162017-08-11 00:00:33 +0530715 unsigned int gen9_wa_cfb_stride;
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200716 } params;
717
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700718 struct intel_fbc_work {
Paulo Zanoni128d7352015-10-26 16:27:49 -0200719 bool scheduled;
Paulo Zanonica18d512016-01-21 18:03:05 -0200720 u32 scheduled_vblank;
Paulo Zanoni128d7352015-10-26 16:27:49 -0200721 struct work_struct work;
Paulo Zanoni128d7352015-10-26 16:27:49 -0200722 } work;
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700723
Paulo Zanonibf6189c2015-10-27 14:50:03 -0200724 const char *no_fbc_reason;
Jesse Barnesb5e50c32010-02-05 12:42:41 -0800725};
726
Chris Wilsonfe88d122016-12-31 11:20:12 +0000727/*
Vandana Kannan96178ee2015-01-10 02:25:56 +0530728 * HIGH_RR is the highest eDP panel refresh rate read from EDID
729 * LOW_RR is the lowest eDP panel refresh rate found from EDID
730 * parsing for same resolution.
731 */
732enum drrs_refresh_rate_type {
733 DRRS_HIGH_RR,
734 DRRS_LOW_RR,
735 DRRS_MAX_RR, /* RR count */
736};
737
738enum drrs_support_type {
739 DRRS_NOT_SUPPORTED = 0,
740 STATIC_DRRS_SUPPORT = 1,
741 SEAMLESS_DRRS_SUPPORT = 2
Pradeep Bhat439d7ac2014-04-05 12:13:28 +0530742};
743
Daniel Vetter2807cf62014-07-11 10:30:11 -0700744struct intel_dp;
Vandana Kannan96178ee2015-01-10 02:25:56 +0530745struct i915_drrs {
746 struct mutex mutex;
747 struct delayed_work work;
748 struct intel_dp *dp;
749 unsigned busy_frontbuffer_bits;
750 enum drrs_refresh_rate_type refresh_rate_type;
751 enum drrs_support_type type;
752};
753
Rodrigo Vivia031d702013-10-03 16:15:06 -0300754struct i915_psr {
Daniel Vetterf0355c42014-07-11 10:30:15 -0700755 struct mutex lock;
Rodrigo Vivia031d702013-10-03 16:15:06 -0300756 bool sink_support;
Daniel Vetter2807cf62014-07-11 10:30:11 -0700757 struct intel_dp *enabled;
Rodrigo Vivi7c8f8a72014-06-13 05:10:03 -0700758 bool active;
759 struct delayed_work work;
Daniel Vetter9ca15302014-07-11 10:30:16 -0700760 unsigned busy_frontbuffer_bits;
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530761 bool psr2_support;
762 bool aux_frame_sync;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -0800763 bool link_standby;
Nagaraju, Vathsala97da2ef2017-01-02 17:00:55 +0530764 bool y_cord_support;
765 bool colorimetry_support;
Nagaraju, Vathsala340c93c2017-01-02 17:00:58 +0530766 bool alpm;
Rodrigo Vivi424644c2017-09-07 16:00:32 -0700767
Rodrigo Vivid0d5e0d2017-09-07 16:00:41 -0700768 void (*enable_source)(struct intel_dp *,
769 const struct intel_crtc_state *);
Rodrigo Vivi424644c2017-09-07 16:00:32 -0700770 void (*disable_source)(struct intel_dp *,
771 const struct intel_crtc_state *);
Rodrigo Vivi49ad3162017-09-07 16:00:40 -0700772 void (*enable_sink)(struct intel_dp *);
Rodrigo Vivie3702ac2017-09-07 16:00:34 -0700773 void (*activate)(struct intel_dp *);
Rodrigo Vivi2a5db872017-09-07 16:00:39 -0700774 void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
Rodrigo Vivi3f51e472013-07-11 18:45:00 -0300775};
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700776
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800777enum intel_pch {
Paulo Zanonif0350832012-07-03 18:48:16 -0300778 PCH_NONE = 0, /* No PCH present */
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800779 PCH_IBX, /* Ibexpeak PCH */
Ville Syrjälä243dec52017-06-20 16:03:08 +0300780 PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
781 PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
Satheeshakrishna Me7e7ea22014-04-09 11:08:57 +0530782 PCH_SPT, /* Sunrisepoint PCH */
Rodrigo Vivi23247d72017-07-31 11:52:20 -0700783 PCH_KBP, /* Kaby Lake PCH */
784 PCH_CNP, /* Cannon Lake PCH */
Anusha Srivatsa0b584362018-01-11 16:00:05 -0200785 PCH_ICP, /* Ice Lake PCH */
Ben Widawsky40c7ead2013-04-05 13:12:40 -0700786 PCH_NOP,
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800787};
788
Paulo Zanoni988d6ee2012-12-01 12:04:24 -0200789enum intel_sbi_destination {
790 SBI_ICLK,
791 SBI_MPHY,
792};
793
Keith Packard435793d2011-07-12 14:56:22 -0700794#define QUIRK_LVDS_SSC_DISABLE (1<<1)
Carsten Emde4dca20e2012-03-15 15:56:26 +0100795#define QUIRK_INVERT_BRIGHTNESS (1<<2)
Scot Doyle9c72cc62014-07-03 23:27:50 +0000796#define QUIRK_BACKLIGHT_PRESENT (1<<3)
Daniel Vetter656bfa32014-11-20 09:26:30 +0100797#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
Manasi Navarec99a2592017-06-30 09:33:48 -0700798#define QUIRK_INCREASE_T12_DELAY (1<<6)
Jesse Barnesb690e962010-07-19 13:53:12 -0700799
Dave Airlie8be48d92010-03-30 05:34:14 +0000800struct intel_fbdev;
Chris Wilson1630fe72011-07-08 12:22:42 +0100801struct intel_fbc_work;
Dave Airlie38651672010-03-30 05:34:13 +0000802
Daniel Vetterc2b91522012-02-14 22:37:19 +0100803struct intel_gmbus {
804 struct i2c_adapter adapter;
Ville Syrjälä3e4d44e2016-03-07 17:56:59 +0200805#define GMBUS_FORCE_BIT_RETRY (1U << 31)
Chris Wilsonf2ce9fa2012-11-10 15:58:21 +0000806 u32 force_bit;
Daniel Vetterc2b91522012-02-14 22:37:19 +0100807 u32 reg0;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200808 i915_reg_t gpio_reg;
Daniel Vetterc167a6f2012-02-28 00:43:09 +0100809 struct i2c_algo_bit_data bit_algo;
Daniel Vetterc2b91522012-02-14 22:37:19 +0100810 struct drm_i915_private *dev_priv;
811};
812
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100813struct i915_suspend_saved_registers {
Keith Packarde948e992008-05-07 12:27:53 +1000814 u32 saveDSPARB;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000815 u32 saveFBC_CONTROL;
Keith Packard1f84e552008-02-16 19:19:29 -0800816 u32 saveCACHE_MODE_0;
Keith Packard1f84e552008-02-16 19:19:29 -0800817 u32 saveMI_ARB_STATE;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000818 u32 saveSWF0[16];
819 u32 saveSWF1[16];
Ville Syrjälä85fa7922015-09-18 20:03:43 +0300820 u32 saveSWF3[3];
Daniel Vetter4b9de732011-10-09 21:52:02 +0200821 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
Adam Jacksoncda2bb72011-07-26 16:53:06 -0400822 u32 savePCH_PORT_HOTPLUG;
Jesse Barnes9f49c372014-12-10 12:16:05 -0800823 u16 saveGCDGMBUS;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100824};
Daniel Vetterc85aa882012-11-02 19:55:03 +0100825
Imre Deakddeea5b2014-05-05 15:19:56 +0300826struct vlv_s0ix_state {
827 /* GAM */
828 u32 wr_watermark;
829 u32 gfx_prio_ctrl;
830 u32 arb_mode;
831 u32 gfx_pend_tlb0;
832 u32 gfx_pend_tlb1;
833 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
834 u32 media_max_req_count;
835 u32 gfx_max_req_count;
836 u32 render_hwsp;
837 u32 ecochk;
838 u32 bsd_hwsp;
839 u32 blt_hwsp;
840 u32 tlb_rd_addr;
841
842 /* MBC */
843 u32 g3dctl;
844 u32 gsckgctl;
845 u32 mbctl;
846
847 /* GCP */
848 u32 ucgctl1;
849 u32 ucgctl3;
850 u32 rcgctl1;
851 u32 rcgctl2;
852 u32 rstctl;
853 u32 misccpctl;
854
855 /* GPM */
856 u32 gfxpause;
857 u32 rpdeuhwtc;
858 u32 rpdeuc;
859 u32 ecobus;
860 u32 pwrdwnupctl;
861 u32 rp_down_timeout;
862 u32 rp_deucsw;
863 u32 rcubmabdtmr;
864 u32 rcedata;
865 u32 spare2gh;
866
867 /* Display 1 CZ domain */
868 u32 gt_imr;
869 u32 gt_ier;
870 u32 pm_imr;
871 u32 pm_ier;
872 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
873
874 /* GT SA CZ domain */
875 u32 tilectl;
876 u32 gt_fifoctl;
877 u32 gtlc_wake_ctrl;
878 u32 gtlc_survive;
879 u32 pmwgicz;
880
881 /* Display 2 CZ domain */
882 u32 gu_ctl0;
883 u32 gu_ctl1;
Jesse Barnes9c252102015-04-01 14:22:57 -0700884 u32 pcbr;
Imre Deakddeea5b2014-05-05 15:19:56 +0300885 u32 clock_gate_dis2;
886};
887
Chris Wilsonbf225f22014-07-10 20:31:18 +0100888struct intel_rps_ei {
Mika Kuoppala679cb6c2017-03-15 17:43:03 +0200889 ktime_t ktime;
Chris Wilsonbf225f22014-07-10 20:31:18 +0100890 u32 render_c0;
891 u32 media_c0;
Deepak S31685c22014-07-03 17:33:01 -0400892};
893
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100894struct intel_rps {
Imre Deakd4d70aa2014-11-19 15:30:04 +0200895 /*
896 * work, interrupts_enabled and pm_iir are protected by
897 * dev_priv->irq_lock
898 */
Daniel Vetterc85aa882012-11-02 19:55:03 +0100899 struct work_struct work;
Imre Deakd4d70aa2014-11-19 15:30:04 +0200900 bool interrupts_enabled;
Daniel Vetterc85aa882012-11-02 19:55:03 +0100901 u32 pm_iir;
Daniel Vetter59cdb632013-07-04 23:35:28 +0200902
Dave Gordonb20e3cf2016-09-12 21:19:35 +0100903 /* PM interrupt bits that should never be masked */
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +0530904 u32 pm_intrmsk_mbz;
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +0530905
Ben Widawskyb39fb292014-03-19 18:31:11 -0700906 /* Frequencies are stored in potentially platform dependent multiples.
907 * In other words, *_freq needs to be multiplied by X to be interesting.
908 * Soft limits are those which are used for the dynamic reclocking done
909 * by the driver (raise frequencies under heavy loads, and lower for
910 * lighter loads). Hard limits are those imposed by the hardware.
911 *
912 * A distinction is made for overclocking, which is never enabled by
913 * default, and is considered to be above the hard limit if it's
914 * possible at all.
915 */
916 u8 cur_freq; /* Current frequency (cached, may not == HW) */
917 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
918 u8 max_freq_softlimit; /* Max frequency permitted by the driver */
919 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
920 u8 min_freq; /* AKA RPn. Minimum frequency */
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100921 u8 boost_freq; /* Frequency to request when wait boosting */
Chris Wilsonaed242f2015-03-18 09:48:21 +0000922 u8 idle_freq; /* Frequency to request when we are idle */
Ben Widawskyb39fb292014-03-19 18:31:11 -0700923 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
924 u8 rp1_freq; /* "less than" RP0 power/freqency */
925 u8 rp0_freq; /* Non-overclocked max frequency. */
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200926 u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
Jesse Barnes1a01ab32012-11-02 11:14:00 -0700927
Chris Wilson8fb55192015-04-07 16:20:28 +0100928 u8 up_threshold; /* Current %busy required to uplock */
929 u8 down_threshold; /* Current %busy required to downclock */
930
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100931 int last_adj;
932 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
933
Chris Wilsonc0951f02013-10-10 21:58:50 +0100934 bool enabled;
Chris Wilson7b92c1b2017-06-28 13:35:48 +0100935 atomic_t num_waiters;
936 atomic_t boosts;
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700937
Chris Wilsonbf225f22014-07-10 20:31:18 +0100938 /* manual wa residency calculations */
Chris Wilsone0e8c7c2017-03-09 21:12:30 +0000939 struct intel_rps_ei ei;
Daniel Vetterc85aa882012-11-02 19:55:03 +0100940};
941
Sagar Arun Kamble37d933f2017-10-10 22:30:10 +0100942struct intel_rc6 {
943 bool enabled;
944};
945
946struct intel_llc_pstate {
947 bool enabled;
948};
949
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100950struct intel_gen6_power_mgmt {
951 struct intel_rps rps;
Sagar Arun Kamble37d933f2017-10-10 22:30:10 +0100952 struct intel_rc6 rc6;
953 struct intel_llc_pstate llc_pstate;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100954};
955
Daniel Vetter1a240d42012-11-29 22:18:51 +0100956/* defined intel_pm.c */
957extern spinlock_t mchdev_lock;
958
Daniel Vetterc85aa882012-11-02 19:55:03 +0100959struct intel_ilk_power_mgmt {
960 u8 cur_delay;
961 u8 min_delay;
962 u8 max_delay;
963 u8 fmax;
964 u8 fstart;
965
966 u64 last_count1;
967 unsigned long last_time1;
968 unsigned long chipset_power;
969 u64 last_count2;
Thomas Gleixner5ed0bdf2014-07-16 21:05:06 +0000970 u64 last_time2;
Daniel Vetterc85aa882012-11-02 19:55:03 +0100971 unsigned long gfx_power;
972 u8 corr;
973
974 int c_m;
975 int r_t;
976};
977
Imre Deakc6cb5822014-03-04 19:22:55 +0200978struct drm_i915_private;
979struct i915_power_well;
980
981struct i915_power_well_ops {
982 /*
983 * Synchronize the well's hw state to match the current sw state, for
984 * example enable/disable it based on the current refcount. Called
985 * during driver init and resume time, possibly after first calling
986 * the enable/disable handlers.
987 */
988 void (*sync_hw)(struct drm_i915_private *dev_priv,
989 struct i915_power_well *power_well);
990 /*
991 * Enable the well and resources that depend on it (for example
992 * interrupts located on the well). Called after the 0->1 refcount
993 * transition.
994 */
995 void (*enable)(struct drm_i915_private *dev_priv,
996 struct i915_power_well *power_well);
997 /*
998 * Disable the well and resources that depend on it. Called after
999 * the 1->0 refcount transition.
1000 */
1001 void (*disable)(struct drm_i915_private *dev_priv,
1002 struct i915_power_well *power_well);
1003 /* Returns the hw enabled state. */
1004 bool (*is_enabled)(struct drm_i915_private *dev_priv,
1005 struct i915_power_well *power_well);
1006};
1007
Wang Xingchaoa38911a2013-05-30 22:07:11 +08001008/* Power well structure for haswell */
1009struct i915_power_well {
Imre Deakc1ca7272013-11-25 17:15:29 +02001010 const char *name;
Imre Deak6f3ef5d2013-11-25 17:15:30 +02001011 bool always_on;
Wang Xingchaoa38911a2013-05-30 22:07:11 +08001012 /* power well enable/disable usage count */
1013 int count;
Imre Deakbfafe932014-06-05 20:31:47 +03001014 /* cached hw enabled state */
1015 bool hw_enabled;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001016 u64 domains;
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001017 /* unique identifier for this power well */
Imre Deak438b8dc2017-07-11 23:42:30 +03001018 enum i915_power_well_id id;
Ander Conselvan de Oliveira362624c2016-10-06 19:22:15 +03001019 /*
1020 * Arbitraty data associated with this power well. Platform and power
1021 * well specific.
1022 */
Imre Deakb5565a22017-07-06 17:40:29 +03001023 union {
1024 struct {
1025 enum dpio_phy phy;
1026 } bxt;
Imre Deak001bd2c2017-07-12 18:54:13 +03001027 struct {
1028 /* Mask of pipes whose IRQ logic is backed by the pw */
1029 u8 irq_pipe_mask;
1030 /* The pw is backing the VGA functionality */
1031 bool has_vga:1;
Imre Deakb2891eb2017-07-11 23:42:35 +03001032 bool has_fuses:1;
Imre Deak001bd2c2017-07-12 18:54:13 +03001033 } hsw;
Imre Deakb5565a22017-07-06 17:40:29 +03001034 };
Imre Deakc6cb5822014-03-04 19:22:55 +02001035 const struct i915_power_well_ops *ops;
Wang Xingchaoa38911a2013-05-30 22:07:11 +08001036};
1037
Imre Deak83c00f52013-10-25 17:36:47 +03001038struct i915_power_domains {
Imre Deakbaa70702013-10-25 17:36:48 +03001039 /*
1040 * Power wells needed for initialization at driver init and suspend
1041 * time are on. They are kept on until after the first modeset.
1042 */
1043 bool init_power_on;
Imre Deak0d116a22014-04-25 13:19:05 +03001044 bool initializing;
Imre Deakc1ca7272013-11-25 17:15:29 +02001045 int power_well_count;
Imre Deakbaa70702013-10-25 17:36:48 +03001046
Imre Deak83c00f52013-10-25 17:36:47 +03001047 struct mutex lock;
Imre Deak1da51582013-11-25 17:15:35 +02001048 int domain_use_count[POWER_DOMAIN_NUM];
Imre Deakc1ca7272013-11-25 17:15:29 +02001049 struct i915_power_well *power_wells;
Imre Deak83c00f52013-10-25 17:36:47 +03001050};
1051
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001052#define MAX_L3_SLICES 2
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001053struct intel_l3_parity {
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001054 u32 *remap_info[MAX_L3_SLICES];
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001055 struct work_struct error_work;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001056 int which_slice;
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001057};
1058
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001059struct i915_gem_mm {
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001060 /** Memory allocator for GTT stolen memory */
1061 struct drm_mm stolen;
Paulo Zanoni92e97d22015-07-02 19:25:09 -03001062 /** Protects the usage of the GTT stolen memory allocator. This is
1063 * always the inner lock when overlapping with struct_mutex. */
1064 struct mutex stolen_lock;
1065
Chris Wilsonf2123812017-10-16 12:40:37 +01001066 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
1067 spinlock_t obj_lock;
1068
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001069 /** List of all objects in gtt_space. Used to restore gtt
1070 * mappings on resume */
1071 struct list_head bound_list;
1072 /**
1073 * List of objects which are not bound to the GTT (thus
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01001074 * are idle and not used by the GPU). These objects may or may
1075 * not actually have any pages attached.
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001076 */
1077 struct list_head unbound_list;
1078
Chris Wilson275f0392016-10-24 13:42:14 +01001079 /** List of all objects in gtt_space, currently mmaped by userspace.
1080 * All objects within this list must also be on bound_list.
1081 */
1082 struct list_head userfault_list;
1083
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01001084 /**
1085 * List of objects which are pending destruction.
1086 */
1087 struct llist_head free_list;
1088 struct work_struct free_work;
Chris Wilson87701b42017-10-13 21:26:20 +01001089 spinlock_t free_lock;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01001090
Chris Wilson66df1012017-08-22 18:38:28 +01001091 /**
1092 * Small stash of WC pages
1093 */
1094 struct pagevec wc_stash;
1095
Matthew Auld465c4032017-10-06 23:18:14 +01001096 /**
1097 * tmpfs instance used for shmem backed objects
1098 */
1099 struct vfsmount *gemfs;
1100
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001101 /** PPGTT used for aliasing the PPGTT with the GTT */
1102 struct i915_hw_ppgtt *aliasing_ppgtt;
1103
Chris Wilson2cfcd322014-05-20 08:28:43 +01001104 struct notifier_block oom_notifier;
Chris Wilsone87666b2016-04-04 14:46:43 +01001105 struct notifier_block vmap_notifier;
Chris Wilsonceabbba52014-03-25 13:23:04 +00001106 struct shrinker shrinker;
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001107
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001108 /** LRU list of objects with fence regs on them. */
1109 struct list_head fence_list;
1110
Chris Wilson8a2421b2017-06-16 15:05:22 +01001111 /**
1112 * Workqueue to fault in userptr pages, flushed by the execbuf
1113 * when required but otherwise left to userspace to try again
1114 * on EAGAIN.
1115 */
1116 struct workqueue_struct *userptr_wq;
1117
Chris Wilson94312822017-05-03 10:39:18 +01001118 u64 unordered_timeline;
1119
Daniel Vetterbdf1e7e2014-05-21 17:37:52 +02001120 /* the indicator for dispatch video commands on two BSD rings */
Joonas Lahtinen6f633402016-09-01 14:58:21 +03001121 atomic_t bsd_engine_dispatch_index;
Daniel Vetterbdf1e7e2014-05-21 17:37:52 +02001122
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001123 /** Bit 6 swizzling required for X tiling */
1124 uint32_t bit_6_swizzle_x;
1125 /** Bit 6 swizzling required for Y tiling */
1126 uint32_t bit_6_swizzle_y;
1127
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001128 /* accounting, useful for userland debugging */
Daniel Vetterc20e8352013-07-24 22:40:23 +02001129 spinlock_t object_stat_lock;
Chris Wilson3ef7f222016-10-18 13:02:48 +01001130 u64 object_memory;
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001131 u32 object_count;
1132};
1133
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001134struct drm_i915_error_state_buf {
Chris Wilson0a4cd7c2014-08-22 14:41:39 +01001135 struct drm_i915_private *i915;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001136 unsigned bytes;
1137 unsigned size;
1138 int err;
1139 u8 *buf;
1140 loff_t start;
1141 loff_t pos;
1142};
1143
Chris Wilsonee42c002017-12-11 19:41:34 +00001144#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
1145
Chris Wilsonb52992c2016-10-28 13:58:24 +01001146#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
1147#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
1148
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001149#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */
1150#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */
1151
Daniel Vetter99584db2012-11-14 17:14:04 +01001152struct i915_gpu_error {
1153 /* For hangcheck timer */
1154#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1155#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03001156
Chris Wilson737b1502015-01-26 18:03:03 +02001157 struct delayed_work hangcheck_work;
Daniel Vetter99584db2012-11-14 17:14:04 +01001158
1159 /* For reset and error_state handling. */
1160 spinlock_t lock;
1161 /* Protected by the above dev->gpu_error.lock. */
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001162 struct i915_gpu_state *first_error;
Chris Wilson094f9a52013-09-25 17:34:55 +01001163
Daniel Vetter9db529a2017-08-08 10:08:28 +02001164 atomic_t pending_fb_pin;
1165
Chris Wilson094f9a52013-09-25 17:34:55 +01001166 unsigned long missed_irq_rings;
1167
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001168 /**
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02001169 * State variable controlling the reset flow and count
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001170 *
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02001171 * This is a counter which gets incremented when reset is triggered,
Chris Wilson8af29b02016-09-09 14:11:47 +01001172 *
Michel Thierry56306c62017-04-18 13:23:16 -07001173 * Before the reset commences, the I915_RESET_BACKOFF bit is set
Chris Wilson8af29b02016-09-09 14:11:47 +01001174 * meaning that any waiters holding onto the struct_mutex should
1175 * relinquish the lock immediately in order for the reset to start.
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02001176 *
1177 * If reset is not completed succesfully, the I915_WEDGE bit is
1178 * set meaning that hardware is terminally sour and there is no
1179 * recovery. All waiters on the reset_queue will be woken when
1180 * that happens.
1181 *
1182 * This counter is used by the wait_seqno code to notice that reset
1183 * event happened and it needs to restart the entire ioctl (since most
1184 * likely the seqno it waited for won't ever signal anytime soon).
Daniel Vetterf69061b2012-12-06 09:01:42 +01001185 *
1186 * This is important for lock-free wait paths, where no contended lock
1187 * naturally enforces the correct ordering between the bail-out of the
1188 * waiter and the gpu reset work code.
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001189 */
Chris Wilson8af29b02016-09-09 14:11:47 +01001190 unsigned long reset_count;
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001191
Chris Wilson8c185ec2017-03-16 17:13:02 +00001192 /**
1193 * flags: Control various stages of the GPU reset
1194 *
1195 * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
1196 * other users acquiring the struct_mutex. To do this we set the
1197 * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
1198 * and then check for that bit before acquiring the struct_mutex (in
1199 * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
1200 * secondary role in preventing two concurrent global reset attempts.
1201 *
1202 * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
1203 * struct_mutex. We try to acquire the struct_mutex in the reset worker,
1204 * but it may be held by some long running waiter (that we cannot
1205 * interrupt without causing trouble). Once we are ready to do the GPU
1206 * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
1207 * they already hold the struct_mutex and want to participate they can
1208 * inspect the bit and do the reset directly, otherwise the worker
1209 * waits for the struct_mutex.
1210 *
Michel Thierry142bc7d2017-06-20 10:57:46 +01001211 * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
1212 * acquire the struct_mutex to reset an engine, we need an explicit
1213 * flag to prevent two concurrent reset attempts in the same engine.
1214 * As the number of engines continues to grow, allocate the flags from
1215 * the most significant bits.
1216 *
Chris Wilson8c185ec2017-03-16 17:13:02 +00001217 * #I915_WEDGED - If reset fails and we can no longer use the GPU,
1218 * we set the #I915_WEDGED bit. Prior to command submission, e.g.
1219 * i915_gem_request_alloc(), this bit is checked and the sequence
1220 * aborted (with -EIO reported to userspace) if set.
1221 */
Chris Wilson8af29b02016-09-09 14:11:47 +01001222 unsigned long flags;
Chris Wilson8c185ec2017-03-16 17:13:02 +00001223#define I915_RESET_BACKOFF 0
1224#define I915_RESET_HANDOFF 1
Daniel Vetter9db529a2017-08-08 10:08:28 +02001225#define I915_RESET_MODESET 2
Chris Wilson8af29b02016-09-09 14:11:47 +01001226#define I915_WEDGED (BITS_PER_LONG - 1)
Michel Thierry142bc7d2017-06-20 10:57:46 +01001227#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001228
Michel Thierry702c8f82017-06-20 10:57:48 +01001229 /** Number of times an engine has been reset */
1230 u32 reset_engine_count[I915_NUM_ENGINES];
1231
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001232 /**
Chris Wilson1f15b762016-07-01 17:23:14 +01001233 * Waitqueue to signal when a hang is detected. Used to for waiters
1234 * to release the struct_mutex for the reset to procede.
1235 */
1236 wait_queue_head_t wait_queue;
1237
1238 /**
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001239 * Waitqueue to signal when the reset has completed. Used by clients
1240 * that wait for dev_priv->mm.wedged to settle.
1241 */
1242 wait_queue_head_t reset_queue;
Daniel Vetter33196de2012-11-14 17:14:05 +01001243
Chris Wilson094f9a52013-09-25 17:34:55 +01001244 /* For missed irq/seqno simulation. */
Chris Wilson688e6c72016-07-01 17:23:15 +01001245 unsigned long test_irq_rings;
Daniel Vetter99584db2012-11-14 17:14:04 +01001246};
1247
Zhang Ruib8efb172013-02-05 15:41:53 +08001248enum modeset_restore {
1249 MODESET_ON_LID_OPEN,
1250 MODESET_DONE,
1251 MODESET_SUSPENDED,
1252};
1253
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001254#define DP_AUX_A 0x40
1255#define DP_AUX_B 0x10
1256#define DP_AUX_C 0x20
1257#define DP_AUX_D 0x30
Rodrigo Vivia324fca2018-01-29 15:22:15 -08001258#define DP_AUX_F 0x60
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001259
Xiong Zhang11c1b652015-08-17 16:04:04 +08001260#define DDC_PIN_B 0x05
1261#define DDC_PIN_C 0x04
1262#define DDC_PIN_D 0x06
1263
Paulo Zanoni6acab152013-09-12 17:06:24 -03001264struct ddi_vbt_port_info {
Ville Syrjäläd6038612017-10-30 16:57:02 +02001265 int max_tmds_clock;
1266
Damien Lespiauce4dd492014-08-01 11:07:54 +01001267 /*
1268 * This is an index in the HDMI/DVI DDI buffer translation table.
1269 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
1270 * populate this field.
1271 */
1272#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
Paulo Zanoni6acab152013-09-12 17:06:24 -03001273 uint8_t hdmi_level_shift;
Paulo Zanoni311a2092013-09-12 17:12:18 -03001274
1275 uint8_t supports_dvi:1;
1276 uint8_t supports_hdmi:1;
1277 uint8_t supports_dp:1;
Imre Deaka98d9c12016-12-21 12:17:24 +02001278 uint8_t supports_edp:1;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001279
1280 uint8_t alternate_aux_channel;
Xiong Zhang11c1b652015-08-17 16:04:04 +08001281 uint8_t alternate_ddc_pin;
Antti Koskipaa75067dd2015-07-10 14:10:55 +03001282
1283 uint8_t dp_boost_level;
1284 uint8_t hdmi_boost_level;
Paulo Zanoni6acab152013-09-12 17:06:24 -03001285};
1286
Rodrigo Vivibfd7ebd2014-11-14 08:52:30 -08001287enum psr_lines_to_wait {
1288 PSR_0_LINES_TO_WAIT = 0,
1289 PSR_1_LINE_TO_WAIT,
1290 PSR_4_LINES_TO_WAIT,
1291 PSR_8_LINES_TO_WAIT
Pradeep Bhat83a72802014-03-28 10:14:57 +05301292};
1293
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001294struct intel_vbt_data {
1295 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1296 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1297
1298 /* Feature bits */
1299 unsigned int int_tv_support:1;
1300 unsigned int lvds_dither:1;
1301 unsigned int lvds_vbt:1;
1302 unsigned int int_crt_support:1;
1303 unsigned int lvds_use_ssc:1;
1304 unsigned int display_clock_mode:1;
1305 unsigned int fdi_rx_polarity_inverted:1;
Ville Syrjälä3e845c72016-04-08 16:28:12 +03001306 unsigned int panel_type:4;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001307 int lvds_ssc_freq;
1308 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1309
Pradeep Bhat83a72802014-03-28 10:14:57 +05301310 enum drrs_support_type drrs_type;
1311
Jani Nikula6aa23e62016-03-24 17:50:20 +02001312 struct {
1313 int rate;
1314 int lanes;
1315 int preemphasis;
1316 int vswing;
Jani Nikula06411f02016-03-24 17:50:21 +02001317 bool low_vswing;
Jani Nikula6aa23e62016-03-24 17:50:20 +02001318 bool initialized;
1319 bool support;
1320 int bpp;
1321 struct edp_power_seq pps;
1322 } edp;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001323
Jani Nikulaf00076d2013-12-14 20:38:29 -02001324 struct {
Rodrigo Vivibfd7ebd2014-11-14 08:52:30 -08001325 bool full_link;
1326 bool require_aux_wakeup;
1327 int idle_frames;
1328 enum psr_lines_to_wait lines_to_wait;
1329 int tp1_wakeup_time;
1330 int tp2_tp3_wakeup_time;
1331 } psr;
1332
1333 struct {
Jani Nikulaf00076d2013-12-14 20:38:29 -02001334 u16 pwm_freq_hz;
Jani Nikula39fbc9c2014-04-09 11:22:06 +03001335 bool present;
Jani Nikulaf00076d2013-12-14 20:38:29 -02001336 bool active_low_pwm;
Jani Nikula1de60682014-06-24 18:27:39 +03001337 u8 min_brightness; /* min_brightness/255 of max */
Vidya Srinivasadd03372016-12-08 11:26:18 +02001338 u8 controller; /* brightness controller number */
Deepak M9a41e172016-04-26 16:14:24 +03001339 enum intel_backlight_type type;
Jani Nikulaf00076d2013-12-14 20:38:29 -02001340 } backlight;
1341
Shobhit Kumard17c5442013-08-27 15:12:25 +03001342 /* MIPI DSI */
1343 struct {
1344 u16 panel_id;
Shobhit Kumard3b542f2014-04-14 11:00:34 +05301345 struct mipi_config *config;
1346 struct mipi_pps_data *pps;
Madhav Chauhan46e58322017-10-13 18:14:59 +05301347 u16 bl_ports;
1348 u16 cabc_ports;
Shobhit Kumard3b542f2014-04-14 11:00:34 +05301349 u8 seq_version;
1350 u32 size;
1351 u8 *data;
Jani Nikula8d3ed2f2015-12-21 15:10:57 +02001352 const u8 *sequence[MIPI_SEQ_MAX];
Shobhit Kumard17c5442013-08-27 15:12:25 +03001353 } dsi;
1354
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001355 int crt_ddc_pin;
1356
1357 int child_dev_num;
Jani Nikulacc998582017-08-24 21:54:03 +03001358 struct child_device_config *child_dev;
Paulo Zanoni6acab152013-09-12 17:06:24 -03001359
1360 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
Jani Nikula9d6c8752016-03-24 17:50:22 +02001361 struct sdvo_device_mapping sdvo_mappings[2];
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001362};
1363
Ville Syrjälä77c122b2013-08-06 22:24:04 +03001364enum intel_ddb_partitioning {
1365 INTEL_DDB_PART_1_2,
1366 INTEL_DDB_PART_5_6, /* IVB+ */
1367};
1368
Ville Syrjälä1fd527c2013-08-06 22:24:05 +03001369struct intel_wm_level {
1370 bool enable;
1371 uint32_t pri_val;
1372 uint32_t spr_val;
1373 uint32_t cur_val;
1374 uint32_t fbc_val;
1375};
1376
Imre Deak820c1982013-12-17 14:46:36 +02001377struct ilk_wm_values {
Ville Syrjälä609cede2013-10-09 19:18:03 +03001378 uint32_t wm_pipe[3];
1379 uint32_t wm_lp[3];
1380 uint32_t wm_lp_spr[3];
1381 uint32_t wm_linetime[3];
1382 bool enable_fbc_wm;
1383 enum intel_ddb_partitioning partitioning;
1384};
1385
Ville Syrjälä114d7dc2017-04-21 21:14:21 +03001386struct g4x_pipe_wm {
Ville Syrjälä1b313892016-11-28 19:37:08 +02001387 uint16_t plane[I915_MAX_PLANES];
Ville Syrjälä04548cb2017-04-21 21:14:29 +03001388 uint16_t fbc;
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001389};
1390
Ville Syrjälä114d7dc2017-04-21 21:14:21 +03001391struct g4x_sr_wm {
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001392 uint16_t plane;
Ville Syrjälä1b313892016-11-28 19:37:08 +02001393 uint16_t cursor;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03001394 uint16_t fbc;
Ville Syrjälä1b313892016-11-28 19:37:08 +02001395};
1396
1397struct vlv_wm_ddl_values {
1398 uint8_t plane[I915_MAX_PLANES];
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001399};
1400
Ville Syrjälä0018fda2015-03-05 21:19:45 +02001401struct vlv_wm_values {
Ville Syrjälä114d7dc2017-04-21 21:14:21 +03001402 struct g4x_pipe_wm pipe[3];
1403 struct g4x_sr_wm sr;
Ville Syrjälä1b313892016-11-28 19:37:08 +02001404 struct vlv_wm_ddl_values ddl[3];
Ville Syrjälä6eb1a682015-06-24 22:00:03 +03001405 uint8_t level;
1406 bool cxsr;
Ville Syrjälä0018fda2015-03-05 21:19:45 +02001407};
1408
Ville Syrjälä04548cb2017-04-21 21:14:29 +03001409struct g4x_wm_values {
1410 struct g4x_pipe_wm pipe[2];
1411 struct g4x_sr_wm sr;
1412 struct g4x_sr_wm hpll;
1413 bool cxsr;
1414 bool hpll_en;
1415 bool fbc_en;
1416};
1417
Damien Lespiauc1939242014-11-04 17:06:41 +00001418struct skl_ddb_entry {
Damien Lespiau16160e32014-11-04 17:06:53 +00001419 uint16_t start, end; /* in number of blocks, 'end' is exclusive */
Damien Lespiauc1939242014-11-04 17:06:41 +00001420};
1421
1422static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
1423{
Damien Lespiau16160e32014-11-04 17:06:53 +00001424 return entry->end - entry->start;
Damien Lespiauc1939242014-11-04 17:06:41 +00001425}
1426
Damien Lespiau08db6652014-11-04 17:06:52 +00001427static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
1428 const struct skl_ddb_entry *e2)
1429{
1430 if (e1->start == e2->start && e1->end == e2->end)
1431 return true;
1432
1433 return false;
1434}
1435
Damien Lespiauc1939242014-11-04 17:06:41 +00001436struct skl_ddb_allocation {
Chandra Konduru2cd601c2015-04-27 15:47:37 -07001437 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
Matt Roper4969d332015-09-24 15:53:10 -07001438 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
Damien Lespiauc1939242014-11-04 17:06:41 +00001439};
1440
Pradeep Bhat2ac96d22014-11-04 17:06:40 +00001441struct skl_wm_values {
Matt Roper2b4b9f32016-05-12 07:06:07 -07001442 unsigned dirty_pipes;
Damien Lespiauc1939242014-11-04 17:06:41 +00001443 struct skl_ddb_allocation ddb;
Pradeep Bhat2ac96d22014-11-04 17:06:40 +00001444};
1445
1446struct skl_wm_level {
Lyudea62163e2016-10-04 14:28:20 -04001447 bool plane_en;
1448 uint16_t plane_res_b;
1449 uint8_t plane_res_l;
Pradeep Bhat2ac96d22014-11-04 17:06:40 +00001450};
1451
Kumar, Mahesh7e452fd2017-08-17 19:15:23 +05301452/* Stores plane specific WM parameters */
1453struct skl_wm_params {
1454 bool x_tiled, y_tiled;
1455 bool rc_surface;
1456 uint32_t width;
1457 uint8_t cpp;
1458 uint32_t plane_pixel_rate;
1459 uint32_t y_min_scanlines;
1460 uint32_t plane_bytes_per_line;
1461 uint_fixed_16_16_t plane_blocks_per_line;
1462 uint_fixed_16_16_t y_tile_minimum;
1463 uint32_t linetime_us;
1464};
1465
Paulo Zanonic67a4702013-08-19 13:18:09 -03001466/*
Paulo Zanoni765dab672014-03-07 20:08:18 -03001467 * This struct helps tracking the state needed for runtime PM, which puts the
1468 * device in PCI D3 state. Notice that when this happens, nothing on the
1469 * graphics device works, even register access, so we don't get interrupts nor
1470 * anything else.
Paulo Zanonic67a4702013-08-19 13:18:09 -03001471 *
Paulo Zanoni765dab672014-03-07 20:08:18 -03001472 * Every piece of our code that needs to actually touch the hardware needs to
1473 * either call intel_runtime_pm_get or call intel_display_power_get with the
1474 * appropriate power domain.
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03001475 *
Paulo Zanoni765dab672014-03-07 20:08:18 -03001476 * Our driver uses the autosuspend delay feature, which means we'll only really
1477 * suspend if we stay with zero refcount for a certain amount of time. The
Daniel Vetterf458ebb2014-09-30 10:56:39 +02001478 * default value is currently very conservative (see intel_runtime_pm_enable), but
Paulo Zanoni765dab672014-03-07 20:08:18 -03001479 * it can be changed with the standard runtime PM files from sysfs.
Paulo Zanonic67a4702013-08-19 13:18:09 -03001480 *
1481 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1482 * goes back to false exactly before we reenable the IRQs. We use this variable
1483 * to check if someone is trying to enable/disable IRQs while they're supposed
1484 * to be disabled. This shouldn't happen and we'll print some error messages in
Paulo Zanoni730488b2014-03-07 20:12:32 -03001485 * case it happens.
Paulo Zanonic67a4702013-08-19 13:18:09 -03001486 *
Paulo Zanoni765dab672014-03-07 20:08:18 -03001487 * For more, read the Documentation/power/runtime_pm.txt.
Paulo Zanonic67a4702013-08-19 13:18:09 -03001488 */
Paulo Zanoni5d584b22014-03-07 20:08:15 -03001489struct i915_runtime_pm {
Imre Deak1f814da2015-12-16 02:52:19 +02001490 atomic_t wakeref_count;
Paulo Zanoni5d584b22014-03-07 20:08:15 -03001491 bool suspended;
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02001492 bool irqs_enabled;
Paulo Zanonic67a4702013-08-19 13:18:09 -03001493};
1494
Daniel Vetter926321d2013-10-16 13:30:34 +02001495enum intel_pipe_crc_source {
1496 INTEL_PIPE_CRC_SOURCE_NONE,
1497 INTEL_PIPE_CRC_SOURCE_PLANE1,
1498 INTEL_PIPE_CRC_SOURCE_PLANE2,
1499 INTEL_PIPE_CRC_SOURCE_PF,
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001500 INTEL_PIPE_CRC_SOURCE_PIPE,
Daniel Vetter3d099a02013-10-16 22:55:58 +02001501 /* TV/DP on pre-gen5/vlv can't use the pipe source. */
1502 INTEL_PIPE_CRC_SOURCE_TV,
1503 INTEL_PIPE_CRC_SOURCE_DP_B,
1504 INTEL_PIPE_CRC_SOURCE_DP_C,
1505 INTEL_PIPE_CRC_SOURCE_DP_D,
Daniel Vetter46a19182013-11-01 10:50:20 +01001506 INTEL_PIPE_CRC_SOURCE_AUTO,
Daniel Vetter926321d2013-10-16 13:30:34 +02001507 INTEL_PIPE_CRC_SOURCE_MAX,
1508};
1509
Shuang He8bf1e9f2013-10-15 18:55:27 +01001510struct intel_pipe_crc_entry {
Damien Lespiauac2300d2013-10-15 18:55:30 +01001511 uint32_t frame;
Shuang He8bf1e9f2013-10-15 18:55:27 +01001512 uint32_t crc[5];
1513};
1514
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001515#define INTEL_PIPE_CRC_ENTRIES_NR 128
Shuang He8bf1e9f2013-10-15 18:55:27 +01001516struct intel_pipe_crc {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001517 spinlock_t lock;
1518 bool opened; /* exclusive access to the result file */
Damien Lespiaue5f75ac2013-10-15 18:55:34 +01001519 struct intel_pipe_crc_entry *entries;
Daniel Vetter926321d2013-10-16 13:30:34 +02001520 enum intel_pipe_crc_source source;
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001521 int head, tail;
Damien Lespiau07144422013-10-15 18:55:40 +01001522 wait_queue_head_t wq;
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +01001523 int skipped;
Shuang He8bf1e9f2013-10-15 18:55:27 +01001524};
1525
Daniel Vetterf99d7062014-06-19 16:01:59 +02001526struct i915_frontbuffer_tracking {
Chris Wilsonb5add952016-08-04 16:32:36 +01001527 spinlock_t lock;
Daniel Vetterf99d7062014-06-19 16:01:59 +02001528
1529 /*
1530 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
1531 * scheduled flips.
1532 */
1533 unsigned busy_bits;
1534 unsigned flip_bits;
1535};
1536
Mika Kuoppala72253422014-10-07 17:21:26 +03001537struct i915_wa_reg {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001538 i915_reg_t addr;
Mika Kuoppala72253422014-10-07 17:21:26 +03001539 u32 value;
1540 /* bitmask representing WA bits */
1541 u32 mask;
1542};
1543
Oscar Mateod6242ae2017-10-17 13:27:51 -07001544#define I915_MAX_WA_REGS 16
Mika Kuoppala72253422014-10-07 17:21:26 +03001545
1546struct i915_workarounds {
1547 struct i915_wa_reg reg[I915_MAX_WA_REGS];
1548 u32 count;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001549 u32 hw_whitelist_count[I915_NUM_ENGINES];
Mika Kuoppala72253422014-10-07 17:21:26 +03001550};
1551
Yu Zhangcf9d2892015-02-10 19:05:47 +08001552struct i915_virtual_gpu {
1553 bool active;
Tina Zhang8a4ab662017-08-14 15:20:46 +08001554 u32 caps;
Yu Zhangcf9d2892015-02-10 19:05:47 +08001555};
1556
Matt Roperaa363132015-09-24 15:53:18 -07001557/* used in computing the new watermarks state */
1558struct intel_wm_config {
1559 unsigned int num_pipes_active;
1560 bool sprites_enabled;
1561 bool sprites_scaled;
1562};
1563
Robert Braggd7965152016-11-07 19:49:52 +00001564struct i915_oa_format {
1565 u32 format;
1566 int size;
1567};
1568
Robert Bragg8a3003d2016-11-07 19:49:51 +00001569struct i915_oa_reg {
1570 i915_reg_t addr;
1571 u32 value;
1572};
1573
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001574struct i915_oa_config {
1575 char uuid[UUID_STRING_LEN + 1];
1576 int id;
1577
1578 const struct i915_oa_reg *mux_regs;
1579 u32 mux_regs_len;
1580 const struct i915_oa_reg *b_counter_regs;
1581 u32 b_counter_regs_len;
1582 const struct i915_oa_reg *flex_regs;
1583 u32 flex_regs_len;
1584
1585 struct attribute_group sysfs_metric;
1586 struct attribute *attrs[2];
1587 struct device_attribute sysfs_metric_id;
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01001588
1589 atomic_t ref_count;
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001590};
1591
Robert Braggeec688e2016-11-07 19:49:47 +00001592struct i915_perf_stream;
1593
Robert Bragg16d98b32016-12-07 21:40:33 +00001594/**
1595 * struct i915_perf_stream_ops - the OPs to support a specific stream type
1596 */
Robert Braggeec688e2016-11-07 19:49:47 +00001597struct i915_perf_stream_ops {
Robert Bragg16d98b32016-12-07 21:40:33 +00001598 /**
1599 * @enable: Enables the collection of HW samples, either in response to
1600 * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
1601 * without `I915_PERF_FLAG_DISABLED`.
Robert Braggeec688e2016-11-07 19:49:47 +00001602 */
1603 void (*enable)(struct i915_perf_stream *stream);
1604
Robert Bragg16d98b32016-12-07 21:40:33 +00001605 /**
1606 * @disable: Disables the collection of HW samples, either in response
1607 * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
1608 * the stream.
Robert Braggeec688e2016-11-07 19:49:47 +00001609 */
1610 void (*disable)(struct i915_perf_stream *stream);
1611
Robert Bragg16d98b32016-12-07 21:40:33 +00001612 /**
1613 * @poll_wait: Call poll_wait, passing a wait queue that will be woken
Robert Braggeec688e2016-11-07 19:49:47 +00001614 * once there is something ready to read() for the stream
1615 */
1616 void (*poll_wait)(struct i915_perf_stream *stream,
1617 struct file *file,
1618 poll_table *wait);
1619
Robert Bragg16d98b32016-12-07 21:40:33 +00001620 /**
1621 * @wait_unlocked: For handling a blocking read, wait until there is
1622 * something to ready to read() for the stream. E.g. wait on the same
Robert Braggd7965152016-11-07 19:49:52 +00001623 * wait queue that would be passed to poll_wait().
Robert Braggeec688e2016-11-07 19:49:47 +00001624 */
1625 int (*wait_unlocked)(struct i915_perf_stream *stream);
1626
Robert Bragg16d98b32016-12-07 21:40:33 +00001627 /**
1628 * @read: Copy buffered metrics as records to userspace
1629 * **buf**: the userspace, destination buffer
1630 * **count**: the number of bytes to copy, requested by userspace
1631 * **offset**: zero at the start of the read, updated as the read
1632 * proceeds, it represents how many bytes have been copied so far and
1633 * the buffer offset for copying the next record.
Robert Braggeec688e2016-11-07 19:49:47 +00001634 *
Robert Bragg16d98b32016-12-07 21:40:33 +00001635 * Copy as many buffered i915 perf samples and records for this stream
1636 * to userspace as will fit in the given buffer.
Robert Braggeec688e2016-11-07 19:49:47 +00001637 *
Robert Bragg16d98b32016-12-07 21:40:33 +00001638 * Only write complete records; returning -%ENOSPC if there isn't room
1639 * for a complete record.
Robert Braggeec688e2016-11-07 19:49:47 +00001640 *
Robert Bragg16d98b32016-12-07 21:40:33 +00001641 * Return any error condition that results in a short read such as
1642 * -%ENOSPC or -%EFAULT, even though these may be squashed before
1643 * returning to userspace.
Robert Braggeec688e2016-11-07 19:49:47 +00001644 */
1645 int (*read)(struct i915_perf_stream *stream,
1646 char __user *buf,
1647 size_t count,
1648 size_t *offset);
1649
Robert Bragg16d98b32016-12-07 21:40:33 +00001650 /**
1651 * @destroy: Cleanup any stream specific resources.
Robert Braggeec688e2016-11-07 19:49:47 +00001652 *
1653 * The stream will always be disabled before this is called.
1654 */
1655 void (*destroy)(struct i915_perf_stream *stream);
1656};
1657
Robert Bragg16d98b32016-12-07 21:40:33 +00001658/**
1659 * struct i915_perf_stream - state for a single open stream FD
1660 */
Robert Braggeec688e2016-11-07 19:49:47 +00001661struct i915_perf_stream {
Robert Bragg16d98b32016-12-07 21:40:33 +00001662 /**
1663 * @dev_priv: i915 drm device
1664 */
Robert Braggeec688e2016-11-07 19:49:47 +00001665 struct drm_i915_private *dev_priv;
1666
Robert Bragg16d98b32016-12-07 21:40:33 +00001667 /**
1668 * @link: Links the stream into ``&drm_i915_private->streams``
1669 */
Robert Braggeec688e2016-11-07 19:49:47 +00001670 struct list_head link;
1671
Robert Bragg16d98b32016-12-07 21:40:33 +00001672 /**
1673 * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
1674 * properties given when opening a stream, representing the contents
1675 * of a single sample as read() by userspace.
1676 */
Robert Braggeec688e2016-11-07 19:49:47 +00001677 u32 sample_flags;
Robert Bragg16d98b32016-12-07 21:40:33 +00001678
1679 /**
1680 * @sample_size: Considering the configured contents of a sample
1681 * combined with the required header size, this is the total size
1682 * of a single sample record.
1683 */
Robert Braggd7965152016-11-07 19:49:52 +00001684 int sample_size;
Robert Braggeec688e2016-11-07 19:49:47 +00001685
Robert Bragg16d98b32016-12-07 21:40:33 +00001686 /**
1687 * @ctx: %NULL if measuring system-wide across all contexts or a
1688 * specific context that is being monitored.
1689 */
Robert Braggeec688e2016-11-07 19:49:47 +00001690 struct i915_gem_context *ctx;
Robert Bragg16d98b32016-12-07 21:40:33 +00001691
1692 /**
1693 * @enabled: Whether the stream is currently enabled, considering
1694 * whether the stream was opened in a disabled state and based
1695 * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
1696 */
Robert Braggeec688e2016-11-07 19:49:47 +00001697 bool enabled;
1698
Robert Bragg16d98b32016-12-07 21:40:33 +00001699 /**
1700 * @ops: The callbacks providing the implementation of this specific
1701 * type of configured stream.
1702 */
Robert Braggd7965152016-11-07 19:49:52 +00001703 const struct i915_perf_stream_ops *ops;
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001704
1705 /**
1706 * @oa_config: The OA configuration used by the stream.
1707 */
1708 struct i915_oa_config *oa_config;
Robert Braggd7965152016-11-07 19:49:52 +00001709};
1710
Robert Bragg16d98b32016-12-07 21:40:33 +00001711/**
1712 * struct i915_oa_ops - Gen specific implementation of an OA unit stream
1713 */
Robert Braggd7965152016-11-07 19:49:52 +00001714struct i915_oa_ops {
Robert Bragg16d98b32016-12-07 21:40:33 +00001715 /**
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01001716 * @is_valid_b_counter_reg: Validates register's address for
1717 * programming boolean counters for a particular platform.
1718 */
1719 bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
1720 u32 addr);
1721
1722 /**
1723 * @is_valid_mux_reg: Validates register's address for programming mux
1724 * for a particular platform.
1725 */
1726 bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
1727
1728 /**
1729 * @is_valid_flex_reg: Validates register's address for programming
1730 * flex EU filtering for a particular platform.
1731 */
1732 bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
1733
1734 /**
Robert Bragg16d98b32016-12-07 21:40:33 +00001735 * @init_oa_buffer: Resets the head and tail pointers of the
1736 * circular buffer for periodic OA reports.
1737 *
1738 * Called when first opening a stream for OA metrics, but also may be
1739 * called in response to an OA buffer overflow or other error
1740 * condition.
1741 *
1742 * Note it may be necessary to clear the full OA buffer here as part of
1743 * maintaining the invariable that new reports must be written to
1744 * zeroed memory for us to be able to reliable detect if an expected
1745 * report has not yet landed in memory. (At least on Haswell the OA
1746 * buffer tail pointer is not synchronized with reports being visible
1747 * to the CPU)
1748 */
Robert Braggd7965152016-11-07 19:49:52 +00001749 void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
Robert Bragg16d98b32016-12-07 21:40:33 +00001750
1751 /**
Robert Bragg19f81df2017-06-13 12:23:03 +01001752 * @enable_metric_set: Selects and applies any MUX configuration to set
1753 * up the Boolean and Custom (B/C) counters that are part of the
1754 * counter reports being sampled. May apply system constraints such as
Robert Bragg16d98b32016-12-07 21:40:33 +00001755 * disabling EU clock gating as required.
1756 */
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001757 int (*enable_metric_set)(struct drm_i915_private *dev_priv,
1758 const struct i915_oa_config *oa_config);
Robert Bragg16d98b32016-12-07 21:40:33 +00001759
1760 /**
1761 * @disable_metric_set: Remove system constraints associated with using
1762 * the OA unit.
1763 */
Robert Braggd7965152016-11-07 19:49:52 +00001764 void (*disable_metric_set)(struct drm_i915_private *dev_priv);
Robert Bragg16d98b32016-12-07 21:40:33 +00001765
1766 /**
1767 * @oa_enable: Enable periodic sampling
1768 */
Robert Braggd7965152016-11-07 19:49:52 +00001769 void (*oa_enable)(struct drm_i915_private *dev_priv);
Robert Bragg16d98b32016-12-07 21:40:33 +00001770
1771 /**
1772 * @oa_disable: Disable periodic sampling
1773 */
Robert Braggd7965152016-11-07 19:49:52 +00001774 void (*oa_disable)(struct drm_i915_private *dev_priv);
Robert Bragg16d98b32016-12-07 21:40:33 +00001775
1776 /**
1777 * @read: Copy data from the circular OA buffer into a given userspace
1778 * buffer.
1779 */
Robert Braggd7965152016-11-07 19:49:52 +00001780 int (*read)(struct i915_perf_stream *stream,
1781 char __user *buf,
1782 size_t count,
1783 size_t *offset);
Robert Bragg16d98b32016-12-07 21:40:33 +00001784
1785 /**
Robert Bragg19f81df2017-06-13 12:23:03 +01001786 * @oa_hw_tail_read: read the OA tail pointer register
Robert Bragg16d98b32016-12-07 21:40:33 +00001787 *
Robert Bragg19f81df2017-06-13 12:23:03 +01001788 * In particular this enables us to share all the fiddly code for
1789 * handling the OA unit tail pointer race that affects multiple
1790 * generations.
Robert Bragg16d98b32016-12-07 21:40:33 +00001791 */
Robert Bragg19f81df2017-06-13 12:23:03 +01001792 u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
Robert Braggeec688e2016-11-07 19:49:47 +00001793};
1794
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001795struct intel_cdclk_state {
Imre Deakb6c51c32018-01-17 19:25:08 +02001796 unsigned int cdclk, vco, ref, bypass;
Ville Syrjälä64600bd2017-10-24 12:52:08 +03001797 u8 voltage_level;
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001798};
1799
Jani Nikula77fec552014-03-31 14:27:22 +03001800struct drm_i915_private {
Chris Wilson8f460e22016-06-24 14:00:18 +01001801 struct drm_device drm;
1802
Chris Wilsonefab6d82015-04-07 16:20:57 +01001803 struct kmem_cache *objects;
Chris Wilsone20d2ab2015-04-07 16:20:58 +01001804 struct kmem_cache *vmas;
Chris Wilsond1b48c12017-08-16 09:52:08 +01001805 struct kmem_cache *luts;
Chris Wilsonefab6d82015-04-07 16:20:57 +01001806 struct kmem_cache *requests;
Chris Wilson52e54202016-11-14 20:41:02 +00001807 struct kmem_cache *dependencies;
Chris Wilsonc5cf9a92017-05-17 13:10:04 +01001808 struct kmem_cache *priorities;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001809
Damien Lespiau5c969aa2014-02-07 19:12:48 +00001810 const struct intel_device_info info;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001811
Matthew Auld77894222017-12-11 15:18:18 +00001812 /**
1813 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
1814 * end of stolen which we can optionally use to create GEM objects
Matthew Auldb1ace602017-12-11 15:18:21 +00001815 * backed by stolen memory. Note that stolen_usable_size tells us
Matthew Auld77894222017-12-11 15:18:18 +00001816 * exactly how much of this we are actually allowed to use, given that
1817 * some portion of it is in fact reserved for use by hardware functions.
1818 */
1819 struct resource dsm;
Matthew Auld17a05342017-12-11 15:18:19 +00001820 /**
1821 * Reseved portion of Data Stolen Memory
1822 */
1823 struct resource dsm_reserved;
Matthew Auld77894222017-12-11 15:18:18 +00001824
Matthew Auldb1ace602017-12-11 15:18:21 +00001825 /*
1826 * Stolen memory is segmented in hardware with different portions
1827 * offlimits to certain functions.
1828 *
1829 * The drm_mm is initialised to the total accessible range, as found
1830 * from the PCI config. On Broadwell+, this is further restricted to
1831 * avoid the first page! The upper end of stolen memory is reserved for
1832 * hardware functions and similarly removed from the accessible range.
1833 */
Matthew Auldb7128ef2017-12-11 15:18:22 +00001834 resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
Matthew Auldb1ace602017-12-11 15:18:21 +00001835
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001836 void __iomem *regs;
1837
Chris Wilson907b28c2013-07-19 20:36:52 +01001838 struct intel_uncore uncore;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001839
Yu Zhangcf9d2892015-02-10 19:05:47 +08001840 struct i915_virtual_gpu vgpu;
1841
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +08001842 struct intel_gvt *gvt;
Zhi Wang0ad35fe2016-06-16 08:07:00 -04001843
Anusha Srivatsabd1328582017-01-18 08:05:53 -08001844 struct intel_huc huc;
Alex Dai33a732f2015-08-12 15:43:36 +01001845 struct intel_guc guc;
1846
Daniel Vettereb805622015-05-04 14:58:44 +02001847 struct intel_csr csr;
1848
Jani Nikula5ea6e5e2015-04-01 10:55:04 +03001849 struct intel_gmbus gmbus[GMBUS_NUM_PINS];
Daniel Vetter28c70f12012-12-01 13:53:45 +01001850
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001851 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1852 * controller on different i2c buses. */
1853 struct mutex gmbus_mutex;
1854
1855 /**
1856 * Base address of the gmbus and gpio block.
1857 */
1858 uint32_t gpio_mmio_base;
1859
Shashank Sharmab6fdd0f2014-05-19 20:54:03 +05301860 /* MMIO base address for MIPI regs */
1861 uint32_t mipi_mmio_base;
1862
Ville Syrjälä443a3892015-11-11 20:34:15 +02001863 uint32_t psr_mmio_base;
1864
Imre Deak44cb7342016-08-10 14:07:29 +03001865 uint32_t pps_mmio_base;
1866
Daniel Vetter28c70f12012-12-01 13:53:45 +01001867 wait_queue_head_t gmbus_wait_queue;
1868
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001869 struct pci_dev *bridge_dev;
Akash Goel3b3f1652016-10-13 22:44:48 +05301870 struct intel_engine_cs *engine[I915_NUM_ENGINES];
Chris Wilsone7af3112017-10-03 21:34:48 +01001871 /* Context used internally to idle the GPU and setup initial state */
1872 struct i915_gem_context *kernel_context;
1873 /* Context only to be used for injecting preemption commands */
1874 struct i915_gem_context *preempt_context;
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00001875 struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
1876 [MAX_ENGINE_INSTANCE + 1];
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001877
Daniel Vetterba8286f2014-09-11 07:43:25 +02001878 struct drm_dma_handle *status_page_dmah;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001879 struct resource mch_res;
1880
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001881 /* protects the irq masks */
1882 spinlock_t irq_lock;
1883
Imre Deakf8b79e52014-03-04 19:23:07 +02001884 bool display_irqs_enabled;
1885
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001886 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1887 struct pm_qos_request pm_qos;
1888
Ville Syrjäläa5805162015-05-26 20:42:30 +03001889 /* Sideband mailbox protection */
1890 struct mutex sb_lock;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001891
1892 /** Cached value of IMR to avoid reads in updating the bitfield */
Ben Widawskyabd58f02013-11-02 21:07:09 -07001893 union {
1894 u32 irq_mask;
1895 u32 de_irq_mask[I915_MAX_PIPES];
1896 };
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001897 u32 gt_irq_mask;
Akash Goelf4e9af42016-10-12 21:54:30 +05301898 u32 pm_imr;
1899 u32 pm_ier;
Deepak Sa6706b42014-03-15 20:23:22 +05301900 u32 pm_rps_events;
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301901 u32 pm_guc_events;
Imre Deak91d181d2014-02-10 18:42:49 +02001902 u32 pipestat_irq_mask[I915_MAX_PIPES];
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001903
Jani Nikula5fcece82015-05-27 15:03:42 +03001904 struct i915_hotplug hotplug;
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001905 struct intel_fbc fbc;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301906 struct i915_drrs drrs;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001907 struct intel_opregion opregion;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03001908 struct intel_vbt_data vbt;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001909
Jesse Barnesd9ceb812014-10-09 12:57:43 -07001910 bool preserve_bios_swizzle;
1911
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001912 /* overlay */
1913 struct intel_overlay *overlay;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001914
Jani Nikula58c68772013-11-08 16:48:54 +02001915 /* backlight registers and fields in struct intel_panel */
Daniel Vetter07f11d42014-09-15 14:35:09 +02001916 struct mutex backlight_lock;
Jani Nikula31ad8ec2013-04-02 15:48:09 +03001917
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001918 /* LVDS info */
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001919 bool no_aux_handshake;
1920
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001921 /* protects panel power sequencer state */
1922 struct mutex pps_mutex;
1923
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001924 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001925 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1926
1927 unsigned int fsb_freq, mem_freq, is_ddr3;
Ville Syrjäläb2045352016-05-13 23:41:27 +03001928 unsigned int skl_preferred_vco_freq;
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001929 unsigned int max_cdclk_freq;
Ville Syrjälä8d965612016-11-14 18:35:10 +02001930
Mika Kaholaadafdc62015-08-18 14:36:59 +03001931 unsigned int max_dotclk_freq;
Ville Syrjäläe7dc33f2016-03-02 17:22:13 +02001932 unsigned int rawclk_freq;
Ville Syrjälä6bcda4f2014-10-07 17:41:22 +03001933 unsigned int hpll_freq;
Chris Wilson58ecd9d2017-11-05 13:49:05 +00001934 unsigned int fdi_pll_freq;
Ville Syrjäläbfa7df02015-09-24 23:29:18 +03001935 unsigned int czclk_freq;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001936
Ville Syrjälä63911d72016-05-13 23:41:32 +03001937 struct {
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +02001938 /*
1939 * The current logical cdclk state.
1940 * See intel_atomic_state.cdclk.logical
1941 *
1942 * For reading holding any crtc lock is sufficient,
1943 * for writing must hold all of them.
1944 */
1945 struct intel_cdclk_state logical;
1946 /*
1947 * The current actual cdclk state.
1948 * See intel_atomic_state.cdclk.actual
1949 */
1950 struct intel_cdclk_state actual;
1951 /* The current hardware cdclk state */
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001952 struct intel_cdclk_state hw;
1953 } cdclk;
Ville Syrjälä63911d72016-05-13 23:41:32 +03001954
Daniel Vetter645416f2013-09-02 16:22:25 +02001955 /**
1956 * wq - Driver workqueue for GEM.
1957 *
1958 * NOTE: Work items scheduled here are not allowed to grab any modeset
1959 * locks, for otherwise the flushing done in the pageflip code will
1960 * result in deadlocks.
1961 */
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001962 struct workqueue_struct *wq;
1963
Ville Syrjälä757fffc2017-11-13 15:36:22 +02001964 /* ordered wq for modesets */
1965 struct workqueue_struct *modeset_wq;
1966
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001967 /* Display functions */
1968 struct drm_i915_display_funcs display;
1969
1970 /* PCH chipset type */
1971 enum intel_pch pch_type;
Paulo Zanoni17a303e2012-11-20 15:12:07 -02001972 unsigned short pch_id;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001973
1974 unsigned long quirks;
1975
Zhang Ruib8efb172013-02-05 15:41:53 +08001976 enum modeset_restore modeset_restore;
1977 struct mutex modeset_restore_lock;
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01001978 struct drm_atomic_state *modeset_restore_state;
Maarten Lankhorst73974892016-08-05 23:28:27 +03001979 struct drm_modeset_acquire_ctx reset_ctx;
Eric Anholt673a3942008-07-30 12:06:12 -07001980
Ben Widawskya7bbbd62013-07-16 16:50:07 -07001981 struct list_head vm_list; /* Global list of all address spaces */
Joonas Lahtinen62106b42016-03-18 10:42:57 +02001982 struct i915_ggtt ggtt; /* VM representing the global address space */
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001983
Daniel Vetter4b5aed62012-11-14 17:14:03 +01001984 struct i915_gem_mm mm;
Chris Wilsonad46cb52014-08-07 14:20:40 +01001985 DECLARE_HASHTABLE(mm_structs, 7);
1986 struct mutex mm_lock;
Daniel Vetter87813422012-05-02 11:49:32 +02001987
Zhi Wang43958902017-09-14 20:39:40 +08001988 struct intel_ppat ppat;
1989
Daniel Vetter87813422012-05-02 11:49:32 +02001990 /* Kernel Modesetting */
1991
Ville Syrjäläe2af48c2016-10-31 22:37:05 +02001992 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1993 struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05001994
Daniel Vetterc4597872013-10-21 21:04:07 +02001995#ifdef CONFIG_DEBUG_FS
1996 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1997#endif
1998
Maarten Lankhorst565602d2015-12-10 12:33:57 +01001999 /* dpll and cdclk state is protected by connection_mutex */
Daniel Vettere72f9fb2013-06-05 13:34:06 +02002000 int num_shared_dpll;
2001 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
Ander Conselvan de Oliveiraf9476a62016-03-08 17:46:22 +02002002 const struct intel_dpll_mgr *dpll_mgr;
Maarten Lankhorst565602d2015-12-10 12:33:57 +01002003
Maarten Lankhorstfbf6d872016-03-23 14:51:12 +01002004 /*
2005 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
2006 * Must be global rather than per dpll, because on some platforms
2007 * plls share registers.
2008 */
2009 struct mutex dpll_lock;
2010
Maarten Lankhorst565602d2015-12-10 12:33:57 +01002011 unsigned int active_crtcs;
Ville Syrjäläd305e062017-08-30 21:57:03 +03002012 /* minimum acceptable cdclk for each pipe */
2013 int min_cdclk[I915_MAX_PIPES];
Ville Syrjälä53e9bf52017-10-24 12:52:14 +03002014 /* minimum acceptable voltage level for each pipe */
2015 u8 min_voltage_level[I915_MAX_PIPES];
Maarten Lankhorst565602d2015-12-10 12:33:57 +01002016
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002017 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
Jesse Barnesee7b9f92012-04-20 17:11:53 +01002018
Mika Kuoppala72253422014-10-07 17:21:26 +03002019 struct i915_workarounds workarounds;
Arun Siluvery888b5992014-08-26 14:44:51 +01002020
Daniel Vetterf99d7062014-06-19 16:01:59 +02002021 struct i915_frontbuffer_tracking fb_tracking;
2022
Chris Wilsoneb955ee2017-01-23 21:29:39 +00002023 struct intel_atomic_helper {
2024 struct llist_head free_list;
2025 struct work_struct free_work;
2026 } atomic_helper;
2027
Jesse Barnes652c3932009-08-17 13:31:43 -07002028 u16 orig_clock;
Jesse Barnesf97108d2010-01-29 11:27:07 -08002029
Zhenyu Wangc48044112009-12-17 14:48:43 +08002030 bool mchbar_need_disable;
Jesse Barnesf97108d2010-01-29 11:27:07 -08002031
Daniel Vettera4da4fa2012-11-02 19:55:07 +01002032 struct intel_l3_parity l3_parity;
Daniel Vetterc6a828d2012-08-08 23:35:35 +02002033
Ben Widawsky59124502013-07-04 11:02:05 -07002034 /* Cannot be determined by PCIID. You must always read a register. */
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002035 u32 edram_cap;
Ben Widawsky59124502013-07-04 11:02:05 -07002036
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01002037 /*
2038 * Protects RPS/RC6 register access and PCU communication.
2039 * Must be taken after struct_mutex if nested. Note that
2040 * this lock may be held for long periods of time when
2041 * talking to hw - so only take it when talking to hw!
2042 */
2043 struct mutex pcu_lock;
2044
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002045 /* gen6+ GT PM state */
2046 struct intel_gen6_power_mgmt gt_pm;
Daniel Vetterc6a828d2012-08-08 23:35:35 +02002047
Daniel Vetter20e4d402012-08-08 23:35:39 +02002048 /* ilk-only ips/rps state. Everything in here is protected by the global
2049 * mchdev_lock in intel_pm.c */
Daniel Vetterc85aa882012-11-02 19:55:03 +01002050 struct intel_ilk_power_mgmt ips;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08002051
Imre Deak83c00f52013-10-25 17:36:47 +03002052 struct i915_power_domains power_domains;
Wang Xingchaoa38911a2013-05-30 22:07:11 +08002053
Rodrigo Vivia031d702013-10-03 16:15:06 -03002054 struct i915_psr psr;
Rodrigo Vivi3f51e472013-07-11 18:45:00 -03002055
Daniel Vetter99584db2012-11-14 17:14:04 +01002056 struct i915_gpu_error gpu_error;
Chris Wilsonae681d92010-10-01 14:57:56 +01002057
Jesse Barnesc9cddff2013-05-08 10:45:13 -07002058 struct drm_i915_gem_object *vlv_pctx;
2059
Dave Airlie8be48d92010-03-30 05:34:14 +00002060 /* list of fbdev register on this device */
2061 struct intel_fbdev *fbdev;
Chris Wilson82e3b8c2014-08-13 13:09:46 +01002062 struct work_struct fbdev_suspend_work;
Chris Wilsone953fd72011-02-21 22:23:52 +00002063
2064 struct drm_property *broadcast_rgb_property;
Chris Wilson3f43c482011-05-12 22:17:24 +01002065 struct drm_property *force_audio_property;
Ben Widawskye3689192012-05-25 16:56:22 -07002066
Imre Deak58fddc22015-01-08 17:54:14 +02002067 /* hda/i915 audio component */
David Henningsson51e1d832015-08-19 10:48:56 +02002068 struct i915_audio_component *audio_component;
Imre Deak58fddc22015-01-08 17:54:14 +02002069 bool audio_component_registered;
Libin Yang4a21ef72015-09-02 14:11:39 +08002070 /**
2071 * av_mutex - mutex for audio/video sync
2072 *
2073 */
2074 struct mutex av_mutex;
Imre Deak58fddc22015-01-08 17:54:14 +02002075
Chris Wilson829a0af2017-06-20 12:05:45 +01002076 struct {
2077 struct list_head list;
Chris Wilson5f09a9c2017-06-20 12:05:46 +01002078 struct llist_head free_list;
2079 struct work_struct free_work;
Chris Wilson829a0af2017-06-20 12:05:45 +01002080
2081 /* The hw wants to have a stable context identifier for the
2082 * lifetime of the context (for OA, PASID, faults, etc).
2083 * This is limited in execlists to 21 bits.
2084 */
2085 struct ida hw_ida;
2086#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
2087 } contexts;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01002088
Damien Lespiau3e683202012-12-11 18:48:29 +00002089 u32 fdi_rx_config;
Paulo Zanoni68d18ad2012-12-01 12:04:26 -02002090
Ville Syrjäläc2317752016-03-15 16:39:56 +02002091 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
Ville Syrjälä70722462015-04-10 18:21:28 +03002092 u32 chv_phy_control;
Ville Syrjäläc2317752016-03-15 16:39:56 +02002093 /*
2094 * Shadows for CHV DPLL_MD regs to keep the state
2095 * checker somewhat working in the presence hardware
2096 * crappiness (can't read out DPLL_MD for pipes B & C).
2097 */
2098 u32 chv_dpll_md[I915_MAX_PIPES];
Imre Deakadc7f042016-04-04 17:27:10 +03002099 u32 bxt_phy_grc;
Ville Syrjälä70722462015-04-10 18:21:28 +03002100
Daniel Vetter842f1c82014-03-10 10:01:44 +01002101 u32 suspend_count;
Imre Deakbc872292015-11-18 17:32:30 +02002102 bool suspended_to_idle;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01002103 struct i915_suspend_saved_registers regfile;
Imre Deakddeea5b2014-05-05 15:19:56 +03002104 struct vlv_s0ix_state vlv_s0ix_state;
Daniel Vetter231f42a2012-11-02 19:55:05 +01002105
Lyude656d1b82016-08-17 15:55:54 -04002106 enum {
Paulo Zanoni16dcdc42016-09-22 18:00:27 -03002107 I915_SAGV_UNKNOWN = 0,
2108 I915_SAGV_DISABLED,
2109 I915_SAGV_ENABLED,
2110 I915_SAGV_NOT_CONTROLLED
2111 } sagv_status;
Lyude656d1b82016-08-17 15:55:54 -04002112
Ville Syrjälä53615a52013-08-01 16:18:50 +03002113 struct {
2114 /*
2115 * Raw watermark latency values:
2116 * in 0.1us units for WM0,
2117 * in 0.5us units for WM1+.
2118 */
2119 /* primary */
2120 uint16_t pri_latency[5];
2121 /* sprite */
2122 uint16_t spr_latency[5];
2123 /* cursor */
2124 uint16_t cur_latency[5];
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002125 /*
2126 * Raw watermark memory latency values
2127 * for SKL for all 8 levels
2128 * in 1us units.
2129 */
2130 uint16_t skl_latency[8];
Ville Syrjälä609cede2013-10-09 19:18:03 +03002131
2132 /* current hardware state */
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00002133 union {
2134 struct ilk_wm_values hw;
2135 struct skl_wm_values skl_hw;
Ville Syrjälä0018fda2015-03-05 21:19:45 +02002136 struct vlv_wm_values vlv;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03002137 struct g4x_wm_values g4x;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00002138 };
Ville Syrjälä58590c12015-09-08 21:05:12 +03002139
2140 uint8_t max_level;
Matt Ropered4a6a72016-02-23 17:20:13 -08002141
2142 /*
2143 * Should be held around atomic WM register writing; also
2144 * protects * intel_crtc->wm.active and
2145 * cstate->wm.need_postvbl_update.
2146 */
2147 struct mutex wm_mutex;
Matt Roper279e99d2016-05-12 07:06:02 -07002148
2149 /*
2150 * Set during HW readout of watermarks/DDB. Some platforms
2151 * need to know when we're still using BIOS-provided values
2152 * (which we don't fully trust).
2153 */
2154 bool distrust_bios_wm;
Ville Syrjälä53615a52013-08-01 16:18:50 +03002155 } wm;
2156
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01002157 struct i915_runtime_pm runtime_pm;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002158
Robert Braggeec688e2016-11-07 19:49:47 +00002159 struct {
2160 bool initialized;
Robert Braggd7965152016-11-07 19:49:52 +00002161
Robert Bragg442b8c02016-11-07 19:49:53 +00002162 struct kobject *metrics_kobj;
Robert Braggccdf6342016-11-07 19:49:54 +00002163 struct ctl_table_header *sysctl_header;
Robert Bragg442b8c02016-11-07 19:49:53 +00002164
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002165 /*
2166 * Lock associated with adding/modifying/removing OA configs
2167 * in dev_priv->perf.metrics_idr.
2168 */
2169 struct mutex metrics_lock;
2170
2171 /*
2172 * List of dynamic configurations, you need to hold
2173 * dev_priv->perf.metrics_lock to access it.
2174 */
2175 struct idr metrics_idr;
2176
2177 /*
2178 * Lock associated with anything below within this structure
2179 * except exclusive_stream.
2180 */
Robert Braggeec688e2016-11-07 19:49:47 +00002181 struct mutex lock;
2182 struct list_head streams;
Robert Bragg8a3003d2016-11-07 19:49:51 +00002183
2184 struct {
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002185 /*
2186 * The stream currently using the OA unit. If accessed
2187 * outside a syscall associated to its file
2188 * descriptor, you need to hold
2189 * dev_priv->drm.struct_mutex.
2190 */
Robert Braggd7965152016-11-07 19:49:52 +00002191 struct i915_perf_stream *exclusive_stream;
2192
2193 u32 specific_ctx_id;
Robert Braggd7965152016-11-07 19:49:52 +00002194
2195 struct hrtimer poll_check_timer;
2196 wait_queue_head_t poll_wq;
2197 bool pollin;
2198
Robert Bragg712122e2017-05-11 16:43:31 +01002199 /**
2200 * For rate limiting any notifications of spurious
2201 * invalid OA reports
2202 */
2203 struct ratelimit_state spurious_report_rs;
2204
Robert Braggd7965152016-11-07 19:49:52 +00002205 bool periodic;
2206 int period_exponent;
Robert Braggd7965152016-11-07 19:49:52 +00002207
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002208 struct i915_oa_config test_config;
Robert Braggd7965152016-11-07 19:49:52 +00002209
2210 struct {
2211 struct i915_vma *vma;
2212 u8 *vaddr;
Robert Bragg19f81df2017-06-13 12:23:03 +01002213 u32 last_ctx_id;
Robert Braggd7965152016-11-07 19:49:52 +00002214 int format;
2215 int format_size;
Robert Braggf2790202017-05-11 16:43:26 +01002216
2217 /**
Robert Bragg0dd860c2017-05-11 16:43:28 +01002218 * Locks reads and writes to all head/tail state
2219 *
2220 * Consider: the head and tail pointer state
2221 * needs to be read consistently from a hrtimer
2222 * callback (atomic context) and read() fop
2223 * (user context) with tail pointer updates
2224 * happening in atomic context and head updates
2225 * in user context and the (unlikely)
2226 * possibility of read() errors needing to
2227 * reset all head/tail state.
2228 *
2229 * Note: Contention or performance aren't
2230 * currently a significant concern here
2231 * considering the relatively low frequency of
2232 * hrtimer callbacks (5ms period) and that
2233 * reads typically only happen in response to a
2234 * hrtimer event and likely complete before the
2235 * next callback.
2236 *
2237 * Note: This lock is not held *while* reading
2238 * and copying data to userspace so the value
2239 * of head observed in htrimer callbacks won't
2240 * represent any partial consumption of data.
2241 */
2242 spinlock_t ptr_lock;
2243
2244 /**
2245 * One 'aging' tail pointer and one 'aged'
2246 * tail pointer ready to used for reading.
2247 *
2248 * Initial values of 0xffffffff are invalid
2249 * and imply that an update is required
2250 * (and should be ignored by an attempted
2251 * read)
2252 */
2253 struct {
2254 u32 offset;
2255 } tails[2];
2256
2257 /**
2258 * Index for the aged tail ready to read()
2259 * data up to.
2260 */
2261 unsigned int aged_tail_idx;
2262
2263 /**
2264 * A monotonic timestamp for when the current
2265 * aging tail pointer was read; used to
2266 * determine when it is old enough to trust.
2267 */
2268 u64 aging_timestamp;
2269
2270 /**
Robert Braggf2790202017-05-11 16:43:26 +01002271 * Although we can always read back the head
2272 * pointer register, we prefer to avoid
2273 * trusting the HW state, just to avoid any
2274 * risk that some hardware condition could
2275 * somehow bump the head pointer unpredictably
2276 * and cause us to forward the wrong OA buffer
2277 * data to userspace.
2278 */
2279 u32 head;
Robert Braggd7965152016-11-07 19:49:52 +00002280 } oa_buffer;
2281
2282 u32 gen7_latched_oastatus1;
Robert Bragg19f81df2017-06-13 12:23:03 +01002283 u32 ctx_oactxctrl_offset;
2284 u32 ctx_flexeu0_offset;
2285
2286 /**
2287 * The RPT_ID/reason field for Gen8+ includes a bit
2288 * to determine if the CTX ID in the report is valid
2289 * but the specific bit differs between Gen 8 and 9
2290 */
2291 u32 gen8_valid_ctx_bit;
Robert Braggd7965152016-11-07 19:49:52 +00002292
2293 struct i915_oa_ops ops;
2294 const struct i915_oa_format *oa_formats;
Robert Bragg8a3003d2016-11-07 19:49:51 +00002295 } oa;
Robert Braggeec688e2016-11-07 19:49:47 +00002296 } perf;
2297
Oscar Mateoa83014d2014-07-24 17:04:21 +01002298 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
2299 struct {
Chris Wilson821ed7d2016-09-09 14:11:53 +01002300 void (*resume)(struct drm_i915_private *);
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00002301 void (*cleanup_engine)(struct intel_engine_cs *engine);
Chris Wilson67d97da2016-07-04 08:08:31 +01002302
Chris Wilson73cb9702016-10-28 13:58:46 +01002303 struct list_head timelines;
2304 struct i915_gem_timeline global_timeline;
Chris Wilson28176ef2016-10-28 13:58:56 +01002305 u32 active_requests;
Chris Wilson73cb9702016-10-28 13:58:46 +01002306
Chris Wilson67d97da2016-07-04 08:08:31 +01002307 /**
2308 * Is the GPU currently considered idle, or busy executing
2309 * userspace requests? Whilst idle, we allow runtime power
2310 * management to power down the hardware and display clocks.
2311 * In order to reduce the effect on performance, there
2312 * is a slight delay before we do so.
2313 */
Chris Wilson67d97da2016-07-04 08:08:31 +01002314 bool awake;
2315
2316 /**
Chris Wilson6f561032018-01-24 11:36:07 +00002317 * The number of times we have woken up.
2318 */
2319 unsigned int epoch;
2320#define I915_EPOCH_INVALID 0
2321
2322 /**
Chris Wilson67d97da2016-07-04 08:08:31 +01002323 * We leave the user IRQ off as much as possible,
2324 * but this means that requests will finish and never
2325 * be retired once the system goes idle. Set a timer to
2326 * fire periodically while the ring is running. When it
2327 * fires, go retire requests.
2328 */
2329 struct delayed_work retire_work;
2330
2331 /**
2332 * When we detect an idle GPU, we want to turn on
2333 * powersaving features. So once we see that there
2334 * are no more requests outstanding and no more
2335 * arrive within a small period of time, we fire
2336 * off the idle_work.
2337 */
2338 struct delayed_work idle_work;
Chris Wilsonde867c22016-10-25 13:16:02 +01002339
2340 ktime_t last_init_time;
Oscar Mateoa83014d2014-07-24 17:04:21 +01002341 } gt;
2342
Ville Syrjälä3be60de2015-09-08 18:05:45 +03002343 /* perform PHY state sanity checks? */
2344 bool chv_phy_assert[2];
2345
Mahesh Kumara3a89862016-12-01 21:19:34 +05302346 bool ipc_enabled;
2347
Pandiyan, Dhinakaranf9318942016-09-21 13:02:48 -07002348 /* Used to save the pipe-to-encoder mapping for audio */
2349 struct intel_encoder *av_enc_map[I915_MAX_PIPES];
Takashi Iwai0bdf5a02015-11-30 18:19:39 +01002350
Jerome Anandeef57322017-01-25 04:27:49 +05302351 /* necessary resource sharing with HDMI LPE audio driver. */
2352 struct {
2353 struct platform_device *platdev;
2354 int irq;
2355 } lpe_audio;
2356
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00002357 struct i915_pmu pmu;
2358
Daniel Vetterbdf1e7e2014-05-21 17:37:52 +02002359 /*
2360 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
2361 * will be rejected. Instead look for a better place.
2362 */
Jani Nikula77fec552014-03-31 14:27:22 +03002363};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364
Chris Wilson2c1792a2013-08-01 18:39:55 +01002365static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
2366{
Chris Wilson091387c2016-06-24 14:00:21 +01002367 return container_of(dev, struct drm_i915_private, drm);
Chris Wilson2c1792a2013-08-01 18:39:55 +01002368}
2369
David Weinehallc49d13e2016-08-22 13:32:42 +03002370static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
Imre Deak888d0d42015-01-08 17:54:13 +02002371{
David Weinehallc49d13e2016-08-22 13:32:42 +03002372 return to_i915(dev_get_drvdata(kdev));
Imre Deak888d0d42015-01-08 17:54:13 +02002373}
2374
Alex Dai33a732f2015-08-12 15:43:36 +01002375static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
2376{
2377 return container_of(guc, struct drm_i915_private, guc);
2378}
2379
Arkadiusz Hiler50beba52017-03-14 15:28:06 +01002380static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
2381{
2382 return container_of(huc, struct drm_i915_private, huc);
2383}
2384
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002385/* Simple iterator over all initialised engines */
Akash Goel3b3f1652016-10-13 22:44:48 +05302386#define for_each_engine(engine__, dev_priv__, id__) \
2387 for ((id__) = 0; \
2388 (id__) < I915_NUM_ENGINES; \
2389 (id__)++) \
2390 for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
Dave Gordonc3232b12016-03-23 18:19:53 +00002391
2392/* Iterator over subset of engines selected by mask */
Chris Wilsonbafb0fc2016-08-27 08:54:01 +01002393#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
2394 for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
Akash Goel3b3f1652016-10-13 22:44:48 +05302395 tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
Mika Kuoppalaee4b6fa2016-03-16 17:54:00 +02002396
Wu Fengguangb1d7e4b2012-02-14 11:45:36 +08002397enum hdmi_force_audio {
2398 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
2399 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
2400 HDMI_AUDIO_AUTO, /* trust EDID */
2401 HDMI_AUDIO_ON, /* force turn on HDMI audio */
2402};
2403
Daniel Vetter190d6cd2013-07-04 13:06:28 +02002404#define I915_GTT_OFFSET_NONE ((u32)-1)
Chris Wilsoned2f3452012-11-15 11:32:19 +00002405
Daniel Vettera071fa02014-06-18 23:28:09 +02002406/*
2407 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
Sagar Arun Kambled1b9d032015-09-14 21:35:42 +05302408 * considered to be the frontbuffer for the given plane interface-wise. This
Daniel Vettera071fa02014-06-18 23:28:09 +02002409 * doesn't mean that the hw necessarily already scans it out, but that any
2410 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
2411 *
2412 * We have one bit per pipe and per scanout plane type.
2413 */
Sagar Arun Kambled1b9d032015-09-14 21:35:42 +05302414#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
Ville Syrjäläc19e1122018-01-23 20:33:43 +02002415#define INTEL_FRONTBUFFER(pipe, plane_id) \
2416 (1 << ((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
Daniel Vettera071fa02014-06-18 23:28:09 +02002417#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
Ville Syrjäläc19e1122018-01-23 20:33:43 +02002418 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
Daniel Vettercc365132014-06-18 13:59:13 +02002419#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
Sagar Arun Kambled1b9d032015-09-14 21:35:42 +05302420 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
Daniel Vettera071fa02014-06-18 23:28:09 +02002421
Dave Gordon85d12252016-05-20 11:54:06 +01002422/*
2423 * Optimised SGL iterator for GEM objects
2424 */
2425static __always_inline struct sgt_iter {
2426 struct scatterlist *sgp;
2427 union {
2428 unsigned long pfn;
2429 dma_addr_t dma;
2430 };
2431 unsigned int curr;
2432 unsigned int max;
2433} __sgt_iter(struct scatterlist *sgl, bool dma) {
2434 struct sgt_iter s = { .sgp = sgl };
2435
2436 if (s.sgp) {
2437 s.max = s.curr = s.sgp->offset;
2438 s.max += s.sgp->length;
2439 if (dma)
2440 s.dma = sg_dma_address(s.sgp);
2441 else
2442 s.pfn = page_to_pfn(sg_page(s.sgp));
2443 }
2444
2445 return s;
2446}
2447
Chris Wilson96d77632016-10-28 13:58:33 +01002448static inline struct scatterlist *____sg_next(struct scatterlist *sg)
2449{
2450 ++sg;
2451 if (unlikely(sg_is_chain(sg)))
2452 sg = sg_chain_ptr(sg);
2453 return sg;
2454}
2455
Dave Gordon85d12252016-05-20 11:54:06 +01002456/**
Dave Gordon63d15322016-05-20 11:54:07 +01002457 * __sg_next - return the next scatterlist entry in a list
2458 * @sg: The current sg entry
2459 *
2460 * Description:
2461 * If the entry is the last, return NULL; otherwise, step to the next
2462 * element in the array (@sg@+1). If that's a chain pointer, follow it;
2463 * otherwise just return the pointer to the current element.
2464 **/
2465static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2466{
2467#ifdef CONFIG_DEBUG_SG
2468 BUG_ON(sg->sg_magic != SG_MAGIC);
2469#endif
Chris Wilson96d77632016-10-28 13:58:33 +01002470 return sg_is_last(sg) ? NULL : ____sg_next(sg);
Dave Gordon63d15322016-05-20 11:54:07 +01002471}
2472
2473/**
Dave Gordon85d12252016-05-20 11:54:06 +01002474 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
2475 * @__dmap: DMA address (output)
2476 * @__iter: 'struct sgt_iter' (iterator state, internal)
2477 * @__sgt: sg_table to iterate over (input)
2478 */
2479#define for_each_sgt_dma(__dmap, __iter, __sgt) \
2480 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
2481 ((__dmap) = (__iter).dma + (__iter).curr); \
Chris Wilsone60b36f2017-09-13 11:57:54 +01002482 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
2483 (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
Dave Gordon85d12252016-05-20 11:54:06 +01002484
2485/**
2486 * for_each_sgt_page - iterate over the pages of the given sg_table
2487 * @__pp: page pointer (output)
2488 * @__iter: 'struct sgt_iter' (iterator state, internal)
2489 * @__sgt: sg_table to iterate over (input)
2490 */
2491#define for_each_sgt_page(__pp, __iter, __sgt) \
2492 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
2493 ((__pp) = (__iter).pfn == 0 ? NULL : \
2494 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
Chris Wilsone60b36f2017-09-13 11:57:54 +01002495 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
2496 (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
Daniel Vettera071fa02014-06-18 23:28:09 +02002497
Matthew Aulda5c081662017-10-06 23:18:18 +01002498static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
2499{
2500 unsigned int page_sizes;
2501
2502 page_sizes = 0;
2503 while (sg) {
2504 GEM_BUG_ON(sg->offset);
2505 GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
2506 page_sizes |= sg->length;
2507 sg = __sg_next(sg);
2508 }
2509
2510 return page_sizes;
2511}
2512
Tvrtko Ursulin56024522017-08-03 10:14:17 +01002513static inline unsigned int i915_sg_segment_size(void)
2514{
2515 unsigned int size = swiotlb_max_segment();
2516
2517 if (size == 0)
2518 return SCATTERLIST_MAX_SEGMENT;
2519
2520 size = rounddown(size, PAGE_SIZE);
2521 /* swiotlb_max_segment_size can return 1 byte when it means one page. */
2522 if (size < PAGE_SIZE)
2523 size = PAGE_SIZE;
2524
2525 return size;
2526}
2527
Tvrtko Ursulin5ca43ef2016-11-16 08:55:45 +00002528static inline const struct intel_device_info *
2529intel_info(const struct drm_i915_private *dev_priv)
2530{
2531 return &dev_priv->info;
2532}
2533
2534#define INTEL_INFO(dev_priv) intel_info((dev_priv))
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01002535
Tvrtko Ursulin55b8f2a2016-10-14 09:17:22 +01002536#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01002537#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
Zou Nan haicae58522010-11-09 17:17:32 +08002538
Jani Nikulae87a0052015-10-20 15:22:02 +03002539#define REVID_FOREVER 0xff
Tvrtko Ursulin4805fe82016-11-04 14:42:46 +00002540#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
Tvrtko Ursulinac657f62016-05-10 10:57:08 +01002541
2542#define GEN_FOREVER (0)
Joonas Lahtinenfe52e592017-09-13 14:52:54 +03002543
2544#define INTEL_GEN_MASK(s, e) ( \
2545 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
2546 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
2547 GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
2548 (s) != GEN_FOREVER ? (s) - 1 : 0) \
2549)
2550
Tvrtko Ursulinac657f62016-05-10 10:57:08 +01002551/*
2552 * Returns true if Gen is in inclusive range [Start, End].
2553 *
2554 * Use GEN_FOREVER for unbound start and or end.
2555 */
Joonas Lahtinenfe52e592017-09-13 14:52:54 +03002556#define IS_GEN(dev_priv, s, e) \
2557 (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
Tvrtko Ursulinac657f62016-05-10 10:57:08 +01002558
Jani Nikulae87a0052015-10-20 15:22:02 +03002559/*
2560 * Return true if revision is in range [since,until] inclusive.
2561 *
2562 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
2563 */
2564#define IS_REVID(p, since, until) \
2565 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
2566
Tvrtko Ursulinae7617f2017-09-27 17:41:38 +01002567#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
Tvrtko Ursulin5a127a82017-09-20 10:26:59 +01002568
2569#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
2570#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
2571#define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X)
2572#define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G)
2573#define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G)
2574#define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM)
2575#define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G)
2576#define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM)
2577#define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G)
2578#define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM)
2579#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
2580#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
Jani Nikulaf69c11a2016-11-30 17:43:05 +02002581#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01002582#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
2583#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
Tvrtko Ursulin5a127a82017-09-20 10:26:59 +01002584#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
2585#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01002586#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
Tvrtko Ursulin5a127a82017-09-20 10:26:59 +01002587#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
Lionel Landwerlin18b53812017-08-30 17:12:07 +01002588#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
2589 (dev_priv)->info.gt == 1)
Tvrtko Ursulin5a127a82017-09-20 10:26:59 +01002590#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
2591#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
2592#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
2593#define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL)
2594#define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
2595#define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON)
2596#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
2597#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
2598#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
2599#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
Rodrigo Vivi412310012018-01-11 16:00:04 -02002600#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
Ville Syrjälä646d5772016-10-31 22:37:14 +02002601#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01002602#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
2603 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
2604#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
2605 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
2606 (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
2607 (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
Ville Syrjäläebb72aa2015-06-03 15:45:12 +03002608/* ULX machines are also considered ULT. */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01002609#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
2610 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
2611#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
Lionel Landwerlin18b53812017-08-30 17:12:07 +01002612 (dev_priv)->info.gt == 3)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01002613#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
2614 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
2615#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
Lionel Landwerlin18b53812017-08-30 17:12:07 +01002616 (dev_priv)->info.gt == 3)
Paulo Zanoni9bbfd202014-04-29 11:00:22 -03002617/* ULX machines are also considered ULT. */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01002618#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
2619 INTEL_DEVID(dev_priv) == 0x0A1E)
2620#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
2621 INTEL_DEVID(dev_priv) == 0x1913 || \
2622 INTEL_DEVID(dev_priv) == 0x1916 || \
2623 INTEL_DEVID(dev_priv) == 0x1921 || \
2624 INTEL_DEVID(dev_priv) == 0x1926)
2625#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
2626 INTEL_DEVID(dev_priv) == 0x1915 || \
2627 INTEL_DEVID(dev_priv) == 0x191E)
2628#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
2629 INTEL_DEVID(dev_priv) == 0x5913 || \
2630 INTEL_DEVID(dev_priv) == 0x5916 || \
2631 INTEL_DEVID(dev_priv) == 0x5921 || \
2632 INTEL_DEVID(dev_priv) == 0x5926)
2633#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
2634 INTEL_DEVID(dev_priv) == 0x5915 || \
2635 INTEL_DEVID(dev_priv) == 0x591E)
Robert Bragg19f81df2017-06-13 12:23:03 +01002636#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
Lionel Landwerlin18b53812017-08-30 17:12:07 +01002637 (dev_priv)->info.gt == 2)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01002638#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
Lionel Landwerlin18b53812017-08-30 17:12:07 +01002639 (dev_priv)->info.gt == 3)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01002640#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
Lionel Landwerlin18b53812017-08-30 17:12:07 +01002641 (dev_priv)->info.gt == 4)
Lionel Landwerlin38915892017-06-13 12:23:07 +01002642#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
Lionel Landwerlin18b53812017-08-30 17:12:07 +01002643 (dev_priv)->info.gt == 2)
Lionel Landwerlin38915892017-06-13 12:23:07 +01002644#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
Lionel Landwerlin18b53812017-08-30 17:12:07 +01002645 (dev_priv)->info.gt == 3)
Rodrigo Vivida411a42017-06-09 15:02:50 -07002646#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2647 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
Lionel Landwerlin22ea4f32017-09-18 12:21:24 +01002648#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2649 (dev_priv)->info.gt == 2)
Lionel Landwerlin4407eaa2017-11-10 19:08:40 +00002650#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2651 (dev_priv)->info.gt == 3)
Rodrigo Vivi3f430312018-01-29 15:22:14 -08002652#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
2653 (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
Sagar Arun Kamble7a58bad2015-09-12 10:17:50 +05302654
Jani Nikulac007fb42016-10-31 12:18:28 +02002655#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
Zou Nan haicae58522010-11-09 17:17:32 +08002656
Jani Nikulaef712bb2015-10-20 15:22:00 +03002657#define SKL_REVID_A0 0x0
2658#define SKL_REVID_B0 0x1
2659#define SKL_REVID_C0 0x2
2660#define SKL_REVID_D0 0x3
2661#define SKL_REVID_E0 0x4
2662#define SKL_REVID_F0 0x5
Mika Kuoppala4ba9c1f2016-07-20 14:26:12 +03002663#define SKL_REVID_G0 0x6
2664#define SKL_REVID_H0 0x7
Hoath, Nicholase90a21d2015-02-05 10:47:17 +00002665
Jani Nikulae87a0052015-10-20 15:22:02 +03002666#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2667
Jani Nikulaef712bb2015-10-20 15:22:00 +03002668#define BXT_REVID_A0 0x0
Jani Nikulafffda3f2015-10-20 15:22:01 +03002669#define BXT_REVID_A1 0x1
Jani Nikulaef712bb2015-10-20 15:22:00 +03002670#define BXT_REVID_B0 0x3
Ander Conselvan de Oliveiraa3f79ca2016-11-24 15:23:27 +02002671#define BXT_REVID_B_LAST 0x8
Jani Nikulaef712bb2015-10-20 15:22:00 +03002672#define BXT_REVID_C0 0x9
Nick Hoath6c74c872015-03-20 09:03:52 +00002673
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +01002674#define IS_BXT_REVID(dev_priv, since, until) \
2675 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
Jani Nikulae87a0052015-10-20 15:22:02 +03002676
Mika Kuoppalac033a372016-06-07 17:18:55 +03002677#define KBL_REVID_A0 0x0
2678#define KBL_REVID_B0 0x1
Mika Kuoppalafe905812016-06-07 17:19:03 +03002679#define KBL_REVID_C0 0x2
2680#define KBL_REVID_D0 0x3
2681#define KBL_REVID_E0 0x4
Mika Kuoppalac033a372016-06-07 17:18:55 +03002682
Tvrtko Ursulin08537232016-10-13 11:03:02 +01002683#define IS_KBL_REVID(dev_priv, since, until) \
2684 (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
Mika Kuoppalac033a372016-06-07 17:18:55 +03002685
Ander Conselvan de Oliveiraf4f4b592017-02-22 08:34:29 +02002686#define GLK_REVID_A0 0x0
2687#define GLK_REVID_A1 0x1
2688
2689#define IS_GLK_REVID(dev_priv, since, until) \
2690 (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2691
Paulo Zanoni3c2e0fd2017-06-06 13:30:34 -07002692#define CNL_REVID_A0 0x0
2693#define CNL_REVID_B0 0x1
Rodrigo Vivie4ffc832017-08-22 16:58:28 -07002694#define CNL_REVID_C0 0x2
Paulo Zanoni3c2e0fd2017-06-06 13:30:34 -07002695
2696#define IS_CNL_REVID(p, since, until) \
2697 (IS_CANNONLAKE(p) && IS_REVID(p, since, until))
2698
Jesse Barnes85436692011-04-06 12:11:14 -07002699/*
2700 * The genX designation typically refers to the render engine, so render
2701 * capability related checks should use IS_GEN, while display and other checks
2702 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2703 * chips, etc.).
2704 */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01002705#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1)))
2706#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2)))
2707#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3)))
2708#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4)))
2709#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5)))
2710#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
2711#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
2712#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
Rodrigo Vivi413f3c12017-06-06 13:30:30 -07002713#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9)))
Rodrigo Vivi412310012018-01-11 16:00:04 -02002714#define IS_GEN11(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(10)))
Zou Nan haicae58522010-11-09 17:17:32 +08002715
Rodrigo Vivi8727dc02016-12-18 13:36:26 -08002716#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
Rodrigo Vivib976dc52017-01-23 10:32:37 -08002717#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
2718#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv))
Ander Conselvan de Oliveira3e4274f2016-11-10 17:23:09 +02002719
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002720#define ENGINE_MASK(id) BIT(id)
2721#define RENDER_RING ENGINE_MASK(RCS)
2722#define BSD_RING ENGINE_MASK(VCS)
2723#define BLT_RING ENGINE_MASK(BCS)
2724#define VEBOX_RING ENGINE_MASK(VECS)
2725#define BSD2_RING ENGINE_MASK(VCS2)
2726#define ALL_ENGINES (~0)
Mika Kuoppalaee4b6fa2016-03-16 17:54:00 +02002727
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002728#define HAS_ENGINE(dev_priv, id) \
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00002729 (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002730
2731#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
2732#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
2733#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
2734#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
2735
Chris Wilson93c6e962017-11-20 20:55:04 +00002736#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv)
2737
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00002738#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
2739#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
2740#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
Tvrtko Ursulin86527442016-10-13 11:03:00 +01002741#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
2742 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
Zou Nan haicae58522010-11-09 17:17:32 +08002743
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00002744#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical)
Daniel Vetter1d2a3142012-02-09 17:15:46 +01002745
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00002746#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
2747 ((dev_priv)->info.has_logical_ring_contexts)
Michał Winiarskia4598d12017-10-25 22:00:18 +02002748#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
2749 ((dev_priv)->info.has_logical_ring_preemption)
Chris Wilsonfb5c5512017-11-20 20:55:00 +00002750
2751#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
2752
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00002753#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
2754#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
2755#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
Matthew Aulda5c081662017-10-06 23:18:18 +01002756#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
2757 GEM_BUG_ON((sizes) == 0); \
2758 ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
2759})
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00002760
2761#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
2762#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
2763 ((dev_priv)->info.overlay_needs_physical)
Zou Nan haicae58522010-11-09 17:17:32 +08002764
Daniel Vetterb45305f2012-12-17 16:21:27 +01002765/* Early gen2 have a totally busted CS tlb and require pinned batches. */
Jani Nikula2a307c22016-11-30 17:43:04 +02002766#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
Mika Kuoppala06e668a2015-12-16 19:18:37 +02002767
2768/* WaRsDisableCoarsePowerGating:skl,bxt */
Tvrtko Ursulin61251512016-06-21 15:07:14 +01002769#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
Jani Nikulaf2254d22017-02-15 17:21:39 +02002770 (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
Mika Kuoppala185c66e2016-04-05 15:56:16 +03002771
Daniel Vetter4e6b7882014-02-07 16:33:20 +01002772/*
2773 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
2774 * even when in MSI mode. This results in spurious interrupt warnings if the
2775 * legacy irq no. is shared with another device. The kernel then disables that
2776 * interrupt source and so prevents the other device from working properly.
Ville Syrjälä309bd8e2017-08-18 21:37:05 +03002777 *
2778 * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX
2779 * interrupts.
Daniel Vetter4e6b7882014-02-07 16:33:20 +01002780 */
Ville Syrjälä309bd8e2017-08-18 21:37:05 +03002781#define HAS_AUX_IRQ(dev_priv) true
2782#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
Daniel Vetterb45305f2012-12-17 16:21:27 +01002783
Zou Nan haicae58522010-11-09 17:17:32 +08002784/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
2785 * rows, which changed the alignment requirements and fence programming.
2786 */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01002787#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
2788 !(IS_I915G(dev_priv) || \
2789 IS_I915GM(dev_priv)))
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00002790#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv)
2791#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
Zou Nan haicae58522010-11-09 17:17:32 +08002792
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00002793#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00002794#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
Ville Syrjälä024faac2017-03-27 21:55:42 +03002795#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7)
Zou Nan haicae58522010-11-09 17:17:32 +08002796
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01002797#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
Damien Lespiauf5adf942013-06-24 18:29:34 +01002798
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00002799#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst)
Jani Nikula0c9b3712015-05-18 17:10:01 +03002800
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00002801#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
2802#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
2803#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
Chris Wilsonfb6db0f2017-12-01 11:30:30 +00002804
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00002805#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
2806#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
Chris Wilsonfb6db0f2017-12-01 11:30:30 +00002807#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
Paulo Zanoniaffa9352012-11-23 15:30:39 -02002808
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00002809#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
Daniel Vettereb805622015-05-04 14:58:44 +02002810
Tvrtko Ursulin6772ffe2016-10-13 11:02:55 +01002811#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
Joonas Lahtinendfc51482016-11-03 10:39:46 +02002812#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
2813
Mahesh Kumare57f1c022017-08-17 19:15:27 +05302814#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc)
2815
Dave Gordon1a3d1892016-05-13 15:36:30 +01002816/*
2817 * For now, anything with a GuC requires uCode loading, and then supports
2818 * command submission once loaded. But these are logically independent
2819 * properties, so we have separate macros to test them.
2820 */
Tvrtko Ursulin4805fe82016-11-04 14:42:46 +00002821#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
Michal Wajdeczkof8a58d62017-05-26 11:13:25 +00002822#define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct)
Tvrtko Ursulin4805fe82016-11-04 14:42:46 +00002823#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
2824#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
Michal Wajdeczko2fe2d4e2017-12-06 13:53:10 +00002825
2826/* For now, anything with a GuC has also HuC */
2827#define HAS_HUC(dev_priv) (HAS_GUC(dev_priv))
Anusha Srivatsabd1328582017-01-18 08:05:53 -08002828#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
Alex Dai33a732f2015-08-12 15:43:36 +01002829
Michal Wajdeczko93ffbe82017-12-06 13:53:12 +00002830/* Having a GuC is not the same as using a GuC */
Michal Wajdeczko121981f2017-12-06 13:53:15 +00002831#define USES_GUC(dev_priv) intel_uc_is_using_guc()
2832#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission()
2833#define USES_HUC(dev_priv) intel_uc_is_using_huc()
Michal Wajdeczko93ffbe82017-12-06 13:53:12 +00002834
Tvrtko Ursulin4805fe82016-11-04 14:42:46 +00002835#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
Abdiel Janulguea9ed33c2015-07-01 10:12:23 +03002836
Tvrtko Ursulin4805fe82016-11-04 14:42:46 +00002837#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
arun.siluvery@linux.intel.com33e141e2016-06-03 06:34:33 +01002838
Ville Syrjäläc5e855d2017-06-21 20:49:44 +03002839#define INTEL_PCH_DEVICE_ID_MASK 0xff80
Paulo Zanoni17a303e2012-11-20 15:12:07 -02002840#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
2841#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
2842#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
2843#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
2844#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
Ville Syrjäläc5e855d2017-06-21 20:49:44 +03002845#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
2846#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
Satheeshakrishna Me7e7ea22014-04-09 11:08:57 +05302847#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
2848#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
Ville Syrjäläc5e855d2017-06-21 20:49:44 +03002849#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
Rodrigo Vivi7b22b8c2017-06-02 13:06:39 -07002850#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
Dhinakaran Pandiyanec7e0bb2017-06-02 13:06:40 -07002851#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
Anusha Srivatsa5c8ea012018-01-11 16:00:10 -02002852#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
Robert Beckett30c964a2015-08-28 13:10:22 +01002853#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
Jesse Barnes1844a662016-03-16 13:31:30 -07002854#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
Gerd Hoffmann39bfcd522015-11-26 12:03:51 +01002855#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
Paulo Zanoni17a303e2012-11-20 15:12:07 -02002856
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01002857#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
Anusha Srivatsa0b584362018-01-11 16:00:05 -02002858#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
Rodrigo Vivi7b22b8c2017-06-02 13:06:39 -07002859#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
Dhinakaran Pandiyanec7e0bb2017-06-02 13:06:40 -07002860#define HAS_PCH_CNP_LP(dev_priv) \
2861 ((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01002862#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
2863#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
2864#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01002865#define HAS_PCH_LPT_LP(dev_priv) \
Ville Syrjäläc5e855d2017-06-21 20:49:44 +03002866 ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
2867 (dev_priv)->pch_id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01002868#define HAS_PCH_LPT_H(dev_priv) \
Ville Syrjäläc5e855d2017-06-21 20:49:44 +03002869 ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
2870 (dev_priv)->pch_id == INTEL_PCH_WPT_DEVICE_ID_TYPE)
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01002871#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
2872#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
2873#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
2874#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
Zou Nan haicae58522010-11-09 17:17:32 +08002875
Tvrtko Ursulin49cff962016-10-13 11:02:54 +01002876#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
Sonika Jindal5fafe292014-07-21 15:23:38 +05302877
Rodrigo Viviff159472017-06-09 15:26:14 -07002878#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
Shashank Sharma6389dd82016-10-14 19:56:50 +05302879
Ben Widawsky040d2ba2013-09-19 11:01:40 -07002880/* DPF == dynamic parity feature */
Tvrtko Ursulin3c9192b2016-10-13 11:03:05 +01002881#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01002882#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2883 2 : HAS_L3_DPF(dev_priv))
Ben Widawskye1ef7cc2012-07-24 20:47:31 -07002884
Ben Widawskyc8735b02012-09-07 19:43:39 -07002885#define GT_FREQUENCY_MULTIPLIER 50
Akash Goelde43ae92015-03-06 11:07:14 +05302886#define GEN9_FREQ_SCALER 3
Ben Widawskyc8735b02012-09-07 19:43:39 -07002887
Chris Wilson05394f32010-11-08 19:18:58 +00002888#include "i915_trace.h"
2889
Chris Wilson80debff2017-05-25 13:16:12 +01002890static inline bool intel_vtd_active(void)
Chris Wilson48f112f2016-06-24 14:07:14 +01002891{
2892#ifdef CONFIG_INTEL_IOMMU
Chris Wilson80debff2017-05-25 13:16:12 +01002893 if (intel_iommu_gfx_mapped)
Chris Wilson48f112f2016-06-24 14:07:14 +01002894 return true;
2895#endif
2896 return false;
2897}
2898
Chris Wilson80debff2017-05-25 13:16:12 +01002899static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2900{
2901 return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
2902}
2903
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002904static inline bool
2905intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
2906{
Chris Wilson80debff2017-05-25 13:16:12 +01002907 return IS_BROXTON(dev_priv) && intel_vtd_active();
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002908}
2909
Chris Wilsonc0336662016-05-06 15:40:21 +01002910int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
David Weinehall351c3b52016-08-22 13:32:41 +03002911 int enable_ppgtt);
Chris Wilson0e4ca102016-04-29 13:18:22 +01002912
Chris Wilson0673ad42016-06-24 14:00:22 +01002913/* i915_drv.c */
Imre Deakd15d7532016-03-18 10:46:10 +02002914void __printf(3, 4)
2915__i915_printk(struct drm_i915_private *dev_priv, const char *level,
2916 const char *fmt, ...);
2917
2918#define i915_report_error(dev_priv, fmt, ...) \
2919 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
2920
Ben Widawskyc43b5632012-04-16 14:07:40 -07002921#ifdef CONFIG_COMPAT
Dave Airlie0d6aa602006-01-02 20:14:23 +11002922extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2923 unsigned long arg);
Jani Nikula55edf412016-11-01 17:40:44 +02002924#else
2925#define i915_compat_ioctl NULL
Ben Widawskyc43b5632012-04-16 14:07:40 -07002926#endif
Jani Nikulaefab0692016-09-15 16:28:54 +03002927extern const struct dev_pm_ops i915_pm_ops;
2928
2929extern int i915_driver_load(struct pci_dev *pdev,
2930 const struct pci_device_id *ent);
2931extern void i915_driver_unload(struct drm_device *dev);
Chris Wilsondc979972016-05-10 14:10:04 +01002932extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2933extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
Chris Wilson535275d2017-07-21 13:32:37 +01002934
2935#define I915_RESET_QUIET BIT(0)
2936extern void i915_reset(struct drm_i915_private *i915, unsigned int flags);
2937extern int i915_reset_engine(struct intel_engine_cs *engine,
2938 unsigned int flags);
2939
Michel Thierry142bc7d2017-06-20 10:57:46 +01002940extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
Michel Thierrycb20a3c2017-10-30 11:56:14 -07002941extern int intel_reset_guc(struct drm_i915_private *dev_priv);
Michel Thierry6acbea82017-10-31 15:53:09 -07002942extern int intel_guc_reset_engine(struct intel_guc *guc,
2943 struct intel_engine_cs *engine);
Tomas Elffc0768c2016-03-21 16:26:59 +00002944extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
Mika Kuoppala3ac168a2016-11-01 18:43:03 +02002945extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
Jesse Barnes7648fa92010-05-20 14:28:11 -07002946extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2947extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2948extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2949extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
Imre Deak650ad972014-04-18 16:35:02 +03002950int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
Jesse Barnes7648fa92010-05-20 14:28:11 -07002951
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +03002952int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
Chris Wilsonbb8f0f52017-01-24 11:01:34 +00002953int intel_engines_init(struct drm_i915_private *dev_priv);
2954
Jani Nikula77913b32015-06-18 13:06:16 +03002955/* intel_hotplug.c */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002956void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
2957 u32 pin_mask, u32 long_mask);
Jani Nikula77913b32015-06-18 13:06:16 +03002958void intel_hpd_init(struct drm_i915_private *dev_priv);
2959void intel_hpd_init_work(struct drm_i915_private *dev_priv);
2960void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
Rodrigo Vivi256cfdd2017-08-11 11:26:49 -07002961enum port intel_hpd_pin_to_port(enum hpd_pin pin);
Rodrigo Vivif761bef22017-08-11 11:26:50 -07002962enum hpd_pin intel_hpd_pin(enum port port);
Lyudeb236d7c82016-06-21 17:03:43 -04002963bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2964void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
Jani Nikula77913b32015-06-18 13:06:16 +03002965
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966/* i915_irq.c */
Chris Wilson26a02b82016-07-01 17:23:13 +01002967static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
2968{
2969 unsigned long delay;
2970
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00002971 if (unlikely(!i915_modparams.enable_hangcheck))
Chris Wilson26a02b82016-07-01 17:23:13 +01002972 return;
2973
2974 /* Don't continually defer the hangcheck so that it is always run at
2975 * least once after work has been scheduled on any ring. Otherwise,
2976 * we will ignore a hung ring if a second ring is kept busy.
2977 */
2978
2979 delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
2980 queue_delayed_work(system_long_wq,
2981 &dev_priv->gpu_error.hangcheck_work, delay);
2982}
2983
Mika Kuoppala58174462014-02-25 17:11:26 +02002984__printf(3, 4)
Chris Wilsonc0336662016-05-06 15:40:21 +01002985void i915_handle_error(struct drm_i915_private *dev_priv,
2986 u32 engine_mask,
Mika Kuoppala58174462014-02-25 17:11:26 +02002987 const char *fmt, ...);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988
Daniel Vetterb9632912014-09-30 10:56:44 +02002989extern void intel_irq_init(struct drm_i915_private *dev_priv);
Joonas Lahtinencefcff82017-04-28 10:58:39 +03002990extern void intel_irq_fini(struct drm_i915_private *dev_priv);
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02002991int intel_irq_install(struct drm_i915_private *dev_priv);
2992void intel_irq_uninstall(struct drm_i915_private *dev_priv);
Chris Wilson907b28c2013-07-19 20:36:52 +01002993
Zhi Wang0ad35fe2016-06-16 08:07:00 -04002994static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
2995{
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +08002996 return dev_priv->gvt;
Zhi Wang0ad35fe2016-06-16 08:07:00 -04002997}
2998
Chris Wilsonc0336662016-05-06 15:40:21 +01002999static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
Yu Zhangcf9d2892015-02-10 19:05:47 +08003000{
Chris Wilsonc0336662016-05-06 15:40:21 +01003001 return dev_priv->vgpu.active;
Yu Zhangcf9d2892015-02-10 19:05:47 +08003002}
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07003003
Ville Syrjälä6b12ca52017-09-14 18:17:31 +03003004u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
3005 enum pipe pipe);
Keith Packard7c463582008-11-04 02:03:27 -08003006void
Jani Nikula50227e12014-03-31 14:27:21 +03003007i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
Imre Deak755e9012014-02-10 18:42:47 +02003008 u32 status_mask);
Keith Packard7c463582008-11-04 02:03:27 -08003009
3010void
Jani Nikula50227e12014-03-31 14:27:21 +03003011i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
Imre Deak755e9012014-02-10 18:42:47 +02003012 u32 status_mask);
Keith Packard7c463582008-11-04 02:03:27 -08003013
Imre Deakf8b79e52014-03-04 19:23:07 +02003014void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
3015void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
Egbert Eich0706f172015-09-23 16:15:27 +02003016void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
3017 uint32_t mask,
3018 uint32_t bits);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02003019void ilk_update_display_irq(struct drm_i915_private *dev_priv,
3020 uint32_t interrupt_mask,
3021 uint32_t enabled_irq_mask);
3022static inline void
3023ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3024{
3025 ilk_update_display_irq(dev_priv, bits, bits);
3026}
3027static inline void
3028ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3029{
3030 ilk_update_display_irq(dev_priv, bits, 0);
3031}
Ville Syrjälä013d3752015-11-23 18:06:17 +02003032void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
3033 enum pipe pipe,
3034 uint32_t interrupt_mask,
3035 uint32_t enabled_irq_mask);
3036static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
3037 enum pipe pipe, uint32_t bits)
3038{
3039 bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
3040}
3041static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
3042 enum pipe pipe, uint32_t bits)
3043{
3044 bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
3045}
Daniel Vetter47339cd2014-09-30 10:56:46 +02003046void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
3047 uint32_t interrupt_mask,
3048 uint32_t enabled_irq_mask);
Ville Syrjälä14443262015-11-23 18:06:15 +02003049static inline void
3050ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3051{
3052 ibx_display_interrupt_update(dev_priv, bits, bits);
3053}
3054static inline void
3055ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3056{
3057 ibx_display_interrupt_update(dev_priv, bits, 0);
3058}
3059
Eric Anholt673a3942008-07-30 12:06:12 -07003060/* i915_gem.c */
Eric Anholt673a3942008-07-30 12:06:12 -07003061int i915_gem_create_ioctl(struct drm_device *dev, void *data,
3062 struct drm_file *file_priv);
3063int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
3064 struct drm_file *file_priv);
3065int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
3066 struct drm_file *file_priv);
3067int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
3068 struct drm_file *file_priv);
Jesse Barnesde151cf2008-11-12 10:03:55 -08003069int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
3070 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003071int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
3072 struct drm_file *file_priv);
3073int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
3074 struct drm_file *file_priv);
3075int i915_gem_execbuffer(struct drm_device *dev, void *data,
3076 struct drm_file *file_priv);
Jesse Barnes76446ca2009-12-17 22:05:42 -05003077int i915_gem_execbuffer2(struct drm_device *dev, void *data,
3078 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003079int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3080 struct drm_file *file_priv);
Ben Widawsky199adf42012-09-21 17:01:20 -07003081int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3082 struct drm_file *file);
3083int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3084 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -07003085int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3086 struct drm_file *file_priv);
Chris Wilson3ef94da2009-09-14 16:50:29 +01003087int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3088 struct drm_file *file_priv);
Chris Wilson111dbca2017-01-10 12:10:44 +00003089int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
3090 struct drm_file *file_priv);
3091int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
3092 struct drm_file *file_priv);
Chris Wilson8a2421b2017-06-16 15:05:22 +01003093int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
3094void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
Chris Wilson5cc9ed42014-05-16 14:22:37 +01003095int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
3096 struct drm_file *file);
Eric Anholt5a125c32008-10-22 21:40:13 -07003097int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
3098 struct drm_file *file_priv);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003099int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
3100 struct drm_file *file_priv);
Chris Wilson24145512017-01-24 11:01:35 +00003101void i915_gem_sanitize(struct drm_i915_private *i915);
Tvrtko Ursulincb15d9f2016-12-01 14:16:39 +00003102int i915_gem_load_init(struct drm_i915_private *dev_priv);
3103void i915_gem_load_cleanup(struct drm_i915_private *dev_priv);
Imre Deak40ae4e12016-03-16 14:54:03 +02003104void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
Chris Wilson6a800ea2016-09-21 14:51:07 +01003105int i915_gem_freeze(struct drm_i915_private *dev_priv);
Chris Wilson461fb992016-05-14 07:26:33 +01003106int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
3107
Tvrtko Ursulin187685c2016-12-01 14:16:36 +00003108void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
Chris Wilson42dcedd2012-11-15 11:32:30 +00003109void i915_gem_object_free(struct drm_i915_gem_object *obj);
Chris Wilson37e680a2012-06-07 15:38:42 +01003110void i915_gem_object_init(struct drm_i915_gem_object *obj,
3111 const struct drm_i915_gem_object_ops *ops);
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +00003112struct drm_i915_gem_object *
3113i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size);
3114struct drm_i915_gem_object *
3115i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
3116 const void *data, size_t size);
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003117void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -07003118void i915_gem_free_object(struct drm_gem_object *obj);
Chris Wilson42dcedd2012-11-15 11:32:30 +00003119
Chris Wilsonbdeb9782016-12-23 14:57:56 +00003120static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
3121{
3122 /* A single pass should suffice to release all the freed objects (along
3123 * most call paths) , but be a little more paranoid in that freeing
3124 * the objects does take a little amount of time, during which the rcu
3125 * callbacks could have added new objects into the freed list, and
3126 * armed the work again.
3127 */
3128 do {
3129 rcu_barrier();
3130 } while (flush_work(&i915->mm.free_work));
3131}
3132
Chris Wilson3b19f162017-07-18 14:41:24 +01003133static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
3134{
3135 /*
3136 * Similar to objects above (see i915_gem_drain_freed-objects), in
3137 * general we have workers that are armed by RCU and then rearm
3138 * themselves in their callbacks. To be paranoid, we need to
3139 * drain the workqueue a second time after waiting for the RCU
3140 * grace period so that we catch work queued via RCU from the first
3141 * pass. As neither drain_workqueue() nor flush_workqueue() report
3142 * a result, we make an assumption that we only don't require more
3143 * than 2 passes to catch all recursive RCU delayed work.
3144 *
3145 */
3146 int pass = 2;
3147 do {
3148 rcu_barrier();
3149 drain_workqueue(i915->wq);
3150 } while (--pass);
3151}
3152
Chris Wilson058d88c2016-08-15 10:49:06 +01003153struct i915_vma * __must_check
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003154i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3155 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +01003156 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +01003157 u64 alignment,
3158 u64 flags);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003159
Chris Wilsonaa653a62016-08-04 07:52:27 +01003160int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
Chris Wilson05394f32010-11-08 19:18:58 +00003161void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003162
Chris Wilson7c108fd2016-10-24 13:42:18 +01003163void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
3164
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003165static inline int __sg_page_count(const struct scatterlist *sg)
Chris Wilson9da3da62012-06-01 15:20:22 +01003166{
Chris Wilsonee286372015-04-07 16:20:25 +01003167 return sg->length >> PAGE_SHIFT;
Chris Wilson9da3da62012-06-01 15:20:22 +01003168}
Chris Wilsonee286372015-04-07 16:20:25 +01003169
Chris Wilson96d77632016-10-28 13:58:33 +01003170struct scatterlist *
3171i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
3172 unsigned int n, unsigned int *offset);
3173
Dave Gordon033908a2015-12-10 18:51:23 +00003174struct page *
Chris Wilson96d77632016-10-28 13:58:33 +01003175i915_gem_object_get_page(struct drm_i915_gem_object *obj,
3176 unsigned int n);
Dave Gordon033908a2015-12-10 18:51:23 +00003177
Chris Wilson96d77632016-10-28 13:58:33 +01003178struct page *
3179i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
3180 unsigned int n);
Chris Wilson341be1c2016-06-10 14:23:00 +05303181
Chris Wilson96d77632016-10-28 13:58:33 +01003182dma_addr_t
3183i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
3184 unsigned long n);
Chris Wilsonee286372015-04-07 16:20:25 +01003185
Chris Wilson03ac84f2016-10-28 13:58:36 +01003186void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
Matthew Aulda5c081662017-10-06 23:18:18 +01003187 struct sg_table *pages,
Matthew Auld84e89782017-10-09 12:00:24 +01003188 unsigned int sg_page_sizes);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003189int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
3190
3191static inline int __must_check
3192i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
Chris Wilsona5570172012-09-04 21:02:54 +01003193{
Chris Wilson1233e2d2016-10-28 13:58:37 +01003194 might_lock(&obj->mm.lock);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003195
Chris Wilson1233e2d2016-10-28 13:58:37 +01003196 if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003197 return 0;
3198
3199 return __i915_gem_object_get_pages(obj);
3200}
3201
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01003202static inline bool
3203i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
3204{
3205 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
3206}
3207
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003208static inline void
3209__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3210{
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01003211 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003212
Chris Wilson1233e2d2016-10-28 13:58:37 +01003213 atomic_inc(&obj->mm.pages_pin_count);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003214}
3215
3216static inline bool
3217i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
3218{
Chris Wilson1233e2d2016-10-28 13:58:37 +01003219 return atomic_read(&obj->mm.pages_pin_count);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003220}
3221
3222static inline void
3223__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3224{
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01003225 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003226 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003227
Chris Wilson1233e2d2016-10-28 13:58:37 +01003228 atomic_dec(&obj->mm.pages_pin_count);
Chris Wilsona5570172012-09-04 21:02:54 +01003229}
Chris Wilson0a798eb2016-04-08 12:11:11 +01003230
Chris Wilson1233e2d2016-10-28 13:58:37 +01003231static inline void
3232i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
Chris Wilsona5570172012-09-04 21:02:54 +01003233{
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003234 __i915_gem_object_unpin_pages(obj);
Chris Wilsona5570172012-09-04 21:02:54 +01003235}
3236
Chris Wilson548625e2016-11-01 12:11:34 +00003237enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
3238 I915_MM_NORMAL = 0,
3239 I915_MM_SHRINKER
3240};
3241
3242void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
3243 enum i915_mm_subclass subclass);
Chris Wilson03ac84f2016-10-28 13:58:36 +01003244void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003245
Chris Wilsond31d7cb2016-08-12 12:39:58 +01003246enum i915_map_type {
3247 I915_MAP_WB = 0,
3248 I915_MAP_WC,
Chris Wilsona575c672017-08-28 11:46:31 +01003249#define I915_MAP_OVERRIDE BIT(31)
3250 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
3251 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
Chris Wilsond31d7cb2016-08-12 12:39:58 +01003252};
3253
Chris Wilson0a798eb2016-04-08 12:11:11 +01003254/**
3255 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
Chris Wilsona73c7a42016-12-31 11:20:10 +00003256 * @obj: the object to map into kernel address space
3257 * @type: the type of mapping, used to select pgprot_t
Chris Wilson0a798eb2016-04-08 12:11:11 +01003258 *
3259 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
3260 * pages and then returns a contiguous mapping of the backing storage into
Chris Wilsond31d7cb2016-08-12 12:39:58 +01003261 * the kernel address space. Based on the @type of mapping, the PTE will be
3262 * set to either WriteBack or WriteCombine (via pgprot_t).
Chris Wilson0a798eb2016-04-08 12:11:11 +01003263 *
Chris Wilson1233e2d2016-10-28 13:58:37 +01003264 * The caller is responsible for calling i915_gem_object_unpin_map() when the
3265 * mapping is no longer required.
Chris Wilson0a798eb2016-04-08 12:11:11 +01003266 *
Dave Gordon83052162016-04-12 14:46:16 +01003267 * Returns the pointer through which to access the mapped object, or an
3268 * ERR_PTR() on error.
Chris Wilson0a798eb2016-04-08 12:11:11 +01003269 */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01003270void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
3271 enum i915_map_type type);
Chris Wilson0a798eb2016-04-08 12:11:11 +01003272
3273/**
3274 * i915_gem_object_unpin_map - releases an earlier mapping
Chris Wilsona73c7a42016-12-31 11:20:10 +00003275 * @obj: the object to unmap
Chris Wilson0a798eb2016-04-08 12:11:11 +01003276 *
3277 * After pinning the object and mapping its pages, once you are finished
3278 * with your access, call i915_gem_object_unpin_map() to release the pin
3279 * upon the mapping. Once the pin count reaches zero, that mapping may be
3280 * removed.
Chris Wilson0a798eb2016-04-08 12:11:11 +01003281 */
3282static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
3283{
Chris Wilson0a798eb2016-04-08 12:11:11 +01003284 i915_gem_object_unpin_pages(obj);
3285}
3286
Chris Wilson43394c72016-08-18 17:16:47 +01003287int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
3288 unsigned int *needs_clflush);
3289int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
3290 unsigned int *needs_clflush);
Chris Wilson7f5f95d2017-03-10 00:09:42 +00003291#define CLFLUSH_BEFORE BIT(0)
3292#define CLFLUSH_AFTER BIT(1)
3293#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
Chris Wilson43394c72016-08-18 17:16:47 +01003294
3295static inline void
3296i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
3297{
3298 i915_gem_object_unpin_pages(obj);
3299}
3300
Chris Wilson54cf91d2010-11-25 18:00:26 +00003301int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
Ben Widawskye2d05a82013-09-24 09:57:58 -07003302void i915_vma_move_to_active(struct i915_vma *vma,
Chris Wilson5cf3d282016-08-04 07:52:43 +01003303 struct drm_i915_gem_request *req,
3304 unsigned int flags);
Dave Airlieff72145b2011-02-07 12:16:14 +10003305int i915_gem_dumb_create(struct drm_file *file_priv,
3306 struct drm_device *dev,
3307 struct drm_mode_create_dumb *args);
Dave Airlieda6b51d2014-12-24 13:11:17 +10003308int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3309 uint32_t handle, uint64_t *offset);
Chris Wilson4cc69072016-08-25 19:05:19 +01003310int i915_gem_mmap_gtt_version(void);
Dave Gordon85d12252016-05-20 11:54:06 +01003311
3312void i915_gem_track_fb(struct drm_i915_gem_object *old,
3313 struct drm_i915_gem_object *new,
3314 unsigned frontbuffer_bits);
3315
Chris Wilson73cb9702016-10-28 13:58:46 +01003316int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
Chris Wilson1690e1e2011-12-14 13:57:08 +01003317
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02003318struct drm_i915_gem_request *
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003319i915_gem_find_active_request(struct intel_engine_cs *engine);
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02003320
Chris Wilson67d97da2016-07-04 08:08:31 +01003321void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
Sourab Gupta84c33a62014-06-02 16:47:17 +05303322
Chris Wilson8c185ec2017-03-16 17:13:02 +00003323static inline bool i915_reset_backoff(struct i915_gpu_error *error)
Daniel Vetter1f83fee2012-11-15 17:17:22 +01003324{
Chris Wilson8c185ec2017-03-16 17:13:02 +00003325 return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
3326}
3327
3328static inline bool i915_reset_handoff(struct i915_gpu_error *error)
3329{
3330 return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags));
Daniel Vetter1f83fee2012-11-15 17:17:22 +01003331}
3332
3333static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
3334{
Chris Wilson8af29b02016-09-09 14:11:47 +01003335 return unlikely(test_bit(I915_WEDGED, &error->flags));
3336}
3337
Chris Wilson8c185ec2017-03-16 17:13:02 +00003338static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
Chris Wilson8af29b02016-09-09 14:11:47 +01003339{
Chris Wilson8c185ec2017-03-16 17:13:02 +00003340 return i915_reset_backoff(error) | i915_terminally_wedged(error);
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02003341}
3342
3343static inline u32 i915_reset_count(struct i915_gpu_error *error)
3344{
Chris Wilson8af29b02016-09-09 14:11:47 +01003345 return READ_ONCE(error->reset_count);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01003346}
Chris Wilsona71d8d92012-02-15 11:25:36 +00003347
Michel Thierry702c8f82017-06-20 10:57:48 +01003348static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
3349 struct intel_engine_cs *engine)
3350{
3351 return READ_ONCE(error->reset_engine_count[engine->id]);
3352}
3353
Michel Thierrya1ef70e2017-06-20 10:57:47 +01003354struct drm_i915_gem_request *
3355i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
Chris Wilson0e178ae2017-01-17 17:59:06 +02003356int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
Chris Wilsond8027092017-02-08 14:30:32 +00003357void i915_gem_reset(struct drm_i915_private *dev_priv);
Michel Thierrya1ef70e2017-06-20 10:57:47 +01003358void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
Chris Wilsonb1ed35d2017-01-04 14:51:10 +00003359void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
Chris Wilson821ed7d2016-09-09 14:11:53 +01003360void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
Chris Wilson2e8f9d32017-03-16 17:13:04 +00003361bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
Michel Thierrya1ef70e2017-06-20 10:57:47 +01003362void i915_gem_reset_engine(struct intel_engine_cs *engine,
3363 struct drm_i915_gem_request *request);
Chris Wilson57822dc2017-02-22 11:40:48 +00003364
Chris Wilson24145512017-01-24 11:01:35 +00003365void i915_gem_init_mmio(struct drm_i915_private *i915);
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00003366int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
3367int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00003368void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
Tvrtko Ursulincb15d9f2016-12-01 14:16:39 +00003369void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
Chris Wilson496b5752017-02-13 17:15:58 +00003370int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
3371 unsigned int flags);
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00003372int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
3373void i915_gem_resume(struct drm_i915_private *dev_priv);
Dave Jiang11bac802017-02-24 14:56:41 -08003374int i915_gem_fault(struct vm_fault *vmf);
Chris Wilsone95433c2016-10-28 13:58:27 +01003375int i915_gem_object_wait(struct drm_i915_gem_object *obj,
3376 unsigned int flags,
3377 long timeout,
3378 struct intel_rps_client *rps);
Chris Wilson6b5e90f2016-11-14 20:41:05 +00003379int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
3380 unsigned int flags,
3381 int priority);
3382#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
3383
Chris Wilson2e2f3512015-04-27 13:41:14 +01003384int __must_check
Chris Wilsone22d8e32017-04-12 12:01:11 +01003385i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
3386int __must_check
3387i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
Chris Wilson20217462010-11-23 15:26:33 +00003388int __must_check
Chris Wilsondabdfe02012-03-26 10:10:27 +02003389i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
Chris Wilson058d88c2016-08-15 10:49:06 +01003390struct i915_vma * __must_check
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003391i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3392 u32 alignment,
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003393 const struct i915_ggtt_view *view);
Chris Wilson058d88c2016-08-15 10:49:06 +01003394void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
Chris Wilson00731152014-05-21 12:42:56 +01003395int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003396 int align);
Chris Wilson829a0af2017-06-20 12:05:45 +01003397int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
Chris Wilson05394f32010-11-08 19:18:58 +00003398void i915_gem_release(struct drm_device *dev, struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -07003399
Chris Wilsone4ffd172011-04-04 09:44:39 +01003400int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3401 enum i915_cache_level cache_level);
3402
Daniel Vetter1286ff72012-05-10 15:25:09 +02003403struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
3404 struct dma_buf *dma_buf);
3405
3406struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
3407 struct drm_gem_object *gem_obj, int flags);
3408
Daniel Vetter841cd772014-08-06 15:04:48 +02003409static inline struct i915_hw_ppgtt *
3410i915_vm_to_ppgtt(struct i915_address_space *vm)
3411{
Daniel Vetter841cd772014-08-06 15:04:48 +02003412 return container_of(vm, struct i915_hw_ppgtt, base);
3413}
3414
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02003415/* i915_gem_fence_reg.c */
Changbin Du969b0952017-09-04 16:01:01 +08003416struct drm_i915_fence_reg *
3417i915_reserve_fence(struct drm_i915_private *dev_priv);
3418void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
Daniel Vetter41a36b72015-07-24 13:55:11 +02003419
Chris Wilsonb1ed35d2017-01-04 14:51:10 +00003420void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
Tvrtko Ursulin4362f4f2016-11-16 08:55:33 +00003421void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
Daniel Vetter41a36b72015-07-24 13:55:11 +02003422
Tvrtko Ursulin4362f4f2016-11-16 08:55:33 +00003423void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
Chris Wilson03ac84f2016-10-28 13:58:36 +01003424void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
3425 struct sg_table *pages);
3426void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
3427 struct sg_table *pages);
Daniel Vetter7f96eca2015-07-24 17:40:14 +02003428
Chris Wilsonca585b52016-05-24 14:53:36 +01003429static inline struct i915_gem_context *
Chris Wilson1acfc102017-06-20 12:05:47 +01003430__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
3431{
3432 return idr_find(&file_priv->context_idr, id);
3433}
3434
3435static inline struct i915_gem_context *
Chris Wilsonca585b52016-05-24 14:53:36 +01003436i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
3437{
3438 struct i915_gem_context *ctx;
3439
Chris Wilson1acfc102017-06-20 12:05:47 +01003440 rcu_read_lock();
3441 ctx = __i915_gem_context_lookup_rcu(file_priv, id);
3442 if (ctx && !kref_get_unless_zero(&ctx->ref))
3443 ctx = NULL;
3444 rcu_read_unlock();
Chris Wilsonca585b52016-05-24 14:53:36 +01003445
3446 return ctx;
3447}
3448
Chris Wilson80b204b2016-10-28 13:58:58 +01003449static inline struct intel_timeline *
3450i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
3451 struct intel_engine_cs *engine)
3452{
3453 struct i915_address_space *vm;
3454
3455 vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
3456 return &vm->timeline.engine[engine->id];
3457}
3458
Robert Braggeec688e2016-11-07 19:49:47 +00003459int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3460 struct drm_file *file);
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003461int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
3462 struct drm_file *file);
3463int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
3464 struct drm_file *file);
Robert Bragg19f81df2017-06-13 12:23:03 +01003465void i915_oa_init_reg_state(struct intel_engine_cs *engine,
3466 struct i915_gem_context *ctx,
3467 uint32_t *reg_state);
Robert Braggeec688e2016-11-07 19:49:47 +00003468
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01003469/* i915_gem_evict.c */
Chris Wilsone522ac22016-08-04 16:32:18 +01003470int __must_check i915_gem_evict_something(struct i915_address_space *vm,
Chris Wilson2ffffd02016-08-04 16:32:22 +01003471 u64 min_size, u64 alignment,
Chris Wilson42d6ab42012-07-26 11:49:32 +01003472 unsigned cache_level,
Chris Wilson2ffffd02016-08-04 16:32:22 +01003473 u64 start, u64 end,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003474 unsigned flags);
Chris Wilson625d9882017-01-11 11:23:11 +00003475int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
3476 struct drm_mm_node *node,
3477 unsigned int flags);
Chris Wilson2889caa2017-06-16 15:05:19 +01003478int i915_gem_evict_vm(struct i915_address_space *vm);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01003479
Chris Wilson71253972017-12-06 12:49:14 +00003480void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
3481
Ben Widawsky0260c422014-03-22 22:47:21 -07003482/* belongs in i915_gem_gtt.h */
Chris Wilsonc0336662016-05-06 15:40:21 +01003483static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07003484{
Chris Wilson600f4362016-08-18 17:16:40 +01003485 wmb();
Chris Wilsonc0336662016-05-06 15:40:21 +01003486 if (INTEL_GEN(dev_priv) < 6)
Eric Anholt673a3942008-07-30 12:06:12 -07003487 intel_gtt_chipset_flush();
3488}
Ben Widawsky246cbfb2013-12-06 14:11:14 -08003489
Chris Wilson9797fbf2012-04-24 15:47:39 +01003490/* i915_gem_stolen.c */
Paulo Zanonid713fd42015-07-02 19:25:07 -03003491int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
3492 struct drm_mm_node *node, u64 size,
3493 unsigned alignment);
Paulo Zanonia9da5122015-09-14 15:19:57 -03003494int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
3495 struct drm_mm_node *node, u64 size,
3496 unsigned alignment, u64 start,
3497 u64 end);
Paulo Zanonid713fd42015-07-02 19:25:07 -03003498void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
3499 struct drm_mm_node *node);
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +00003500int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
Chris Wilson9797fbf2012-04-24 15:47:39 +01003501void i915_gem_cleanup_stolen(struct drm_device *dev);
Chris Wilson0104fdb2012-11-15 11:32:26 +00003502struct drm_i915_gem_object *
Matthew Auldb7128ef2017-12-11 15:18:22 +00003503i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
3504 resource_size_t size);
Chris Wilson866d12b2013-02-19 13:31:37 -08003505struct drm_i915_gem_object *
Tvrtko Ursulin187685c2016-12-01 14:16:36 +00003506i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
Matthew Auldb7128ef2017-12-11 15:18:22 +00003507 resource_size_t stolen_offset,
3508 resource_size_t gtt_offset,
3509 resource_size_t size);
Chris Wilson9797fbf2012-04-24 15:47:39 +01003510
Chris Wilson920cf412016-10-28 13:58:30 +01003511/* i915_gem_internal.c */
3512struct drm_i915_gem_object *
3513i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
Chris Wilsonfcd46e52017-01-12 13:04:31 +00003514 phys_addr_t size);
Chris Wilson920cf412016-10-28 13:58:30 +01003515
Daniel Vetterbe6a0372015-03-18 10:46:04 +01003516/* i915_gem_shrinker.c */
Chris Wilson56fa4bf2017-11-23 11:53:38 +00003517unsigned long i915_gem_shrink(struct drm_i915_private *i915,
Chris Wilson14387542015-10-01 12:18:25 +01003518 unsigned long target,
Chris Wilson912d5722017-09-06 16:19:30 -07003519 unsigned long *nr_scanned,
Daniel Vetterbe6a0372015-03-18 10:46:04 +01003520 unsigned flags);
3521#define I915_SHRINK_PURGEABLE 0x1
3522#define I915_SHRINK_UNBOUND 0x2
3523#define I915_SHRINK_BOUND 0x4
Chris Wilson5763ff02015-10-01 12:18:29 +01003524#define I915_SHRINK_ACTIVE 0x8
Chris Wilsoneae2c432016-04-08 12:11:12 +01003525#define I915_SHRINK_VMAPS 0x10
Chris Wilson56fa4bf2017-11-23 11:53:38 +00003526unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
3527void i915_gem_shrinker_register(struct drm_i915_private *i915);
3528void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
Daniel Vetterbe6a0372015-03-18 10:46:04 +01003529
3530
Eric Anholt673a3942008-07-30 12:06:12 -07003531/* i915_gem_tiling.c */
Chris Wilson2c1792a2013-08-01 18:39:55 +01003532static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Chris Wilsone9b73c62012-12-03 21:03:14 +00003533{
Chris Wilson091387c2016-06-24 14:00:21 +01003534 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsone9b73c62012-12-03 21:03:14 +00003535
3536 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
Chris Wilson3e510a82016-08-05 10:14:23 +01003537 i915_gem_object_is_tiled(obj);
Chris Wilsone9b73c62012-12-03 21:03:14 +00003538}
3539
Chris Wilson91d4e0aa2017-01-09 16:16:13 +00003540u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
3541 unsigned int tiling, unsigned int stride);
3542u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
3543 unsigned int tiling, unsigned int stride);
3544
Ben Gamari20172632009-02-17 20:08:50 -05003545/* i915_debugfs.c */
Daniel Vetterf8c168f2013-10-16 11:49:58 +02003546#ifdef CONFIG_DEBUG_FS
Chris Wilson1dac8912016-06-24 14:00:17 +01003547int i915_debugfs_register(struct drm_i915_private *dev_priv);
Jani Nikula249e87d2015-04-10 16:59:32 +03003548int i915_debugfs_connector_add(struct drm_connector *connector);
David Weinehall36cdd012016-08-22 13:59:31 +03003549void intel_display_crc_init(struct drm_i915_private *dev_priv);
Damien Lespiau07144422013-10-15 18:55:40 +01003550#else
Chris Wilson8d35acb2016-07-12 12:55:29 +01003551static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
Daniel Vetter101057f2015-07-13 09:23:19 +02003552static inline int i915_debugfs_connector_add(struct drm_connector *connector)
3553{ return 0; }
Maarten Lankhorstce5e2ac2016-08-25 11:07:01 +02003554static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
Damien Lespiau07144422013-10-15 18:55:40 +01003555#endif
Mika Kuoppala84734a02013-07-12 16:50:57 +03003556
3557/* i915_gpu_error.c */
Chris Wilson98a2f412016-10-12 10:05:18 +01003558#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
3559
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03003560__printf(2, 3)
3561void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
Mika Kuoppalafc16b482013-06-06 15:18:39 +03003562int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00003563 const struct i915_gpu_state *gpu);
Mika Kuoppala4dc955f2013-06-06 15:18:41 +03003564int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
Chris Wilson0a4cd7c2014-08-22 14:41:39 +01003565 struct drm_i915_private *i915,
Mika Kuoppala4dc955f2013-06-06 15:18:41 +03003566 size_t count, loff_t pos);
3567static inline void i915_error_state_buf_release(
3568 struct drm_i915_error_state_buf *eb)
3569{
3570 kfree(eb->buf);
3571}
Chris Wilson5a4c6f12017-02-14 16:46:11 +00003572
3573struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
Chris Wilsonc0336662016-05-06 15:40:21 +01003574void i915_capture_error_state(struct drm_i915_private *dev_priv,
3575 u32 engine_mask,
Mika Kuoppala58174462014-02-25 17:11:26 +02003576 const char *error_msg);
Chris Wilson5a4c6f12017-02-14 16:46:11 +00003577
3578static inline struct i915_gpu_state *
3579i915_gpu_state_get(struct i915_gpu_state *gpu)
3580{
3581 kref_get(&gpu->ref);
3582 return gpu;
3583}
3584
3585void __i915_gpu_state_free(struct kref *kref);
3586static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
3587{
3588 if (gpu)
3589 kref_put(&gpu->ref, __i915_gpu_state_free);
3590}
3591
3592struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
3593void i915_reset_error_state(struct drm_i915_private *i915);
Mika Kuoppala84734a02013-07-12 16:50:57 +03003594
Chris Wilson98a2f412016-10-12 10:05:18 +01003595#else
3596
3597static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
3598 u32 engine_mask,
3599 const char *error_msg)
3600{
3601}
3602
Chris Wilson5a4c6f12017-02-14 16:46:11 +00003603static inline struct i915_gpu_state *
3604i915_first_error_state(struct drm_i915_private *i915)
3605{
3606 return NULL;
3607}
3608
3609static inline void i915_reset_error_state(struct drm_i915_private *i915)
Chris Wilson98a2f412016-10-12 10:05:18 +01003610{
3611}
3612
3613#endif
3614
Chris Wilson0a4cd7c2014-08-22 14:41:39 +01003615const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
Ben Gamari20172632009-02-17 20:08:50 -05003616
Brad Volkin351e3db2014-02-18 10:15:46 -08003617/* i915_cmd_parser.c */
Chris Wilson1ca37122016-05-04 14:25:36 +01003618int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
Chris Wilson7756e452016-08-18 17:17:10 +01003619void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
Chris Wilson33a051a2016-07-27 09:07:26 +01003620void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
Chris Wilson33a051a2016-07-27 09:07:26 +01003621int intel_engine_cmd_parser(struct intel_engine_cs *engine,
3622 struct drm_i915_gem_object *batch_obj,
3623 struct drm_i915_gem_object *shadow_batch_obj,
3624 u32 batch_start_offset,
3625 u32 batch_len,
3626 bool is_master);
Brad Volkin351e3db2014-02-18 10:15:46 -08003627
Robert Braggeec688e2016-11-07 19:49:47 +00003628/* i915_perf.c */
3629extern void i915_perf_init(struct drm_i915_private *dev_priv);
3630extern void i915_perf_fini(struct drm_i915_private *dev_priv);
Robert Bragg442b8c02016-11-07 19:49:53 +00003631extern void i915_perf_register(struct drm_i915_private *dev_priv);
3632extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
Robert Braggeec688e2016-11-07 19:49:47 +00003633
Jesse Barnes317c35d2008-08-25 15:11:06 -07003634/* i915_suspend.c */
Tvrtko Ursulinaf6dc742016-12-01 14:16:44 +00003635extern int i915_save_state(struct drm_i915_private *dev_priv);
3636extern int i915_restore_state(struct drm_i915_private *dev_priv);
Jesse Barnes317c35d2008-08-25 15:11:06 -07003637
Ben Widawsky0136db52012-04-10 21:17:01 -07003638/* i915_sysfs.c */
David Weinehall694c2822016-08-22 13:32:43 +03003639void i915_setup_sysfs(struct drm_i915_private *dev_priv);
3640void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
Ben Widawsky0136db52012-04-10 21:17:01 -07003641
Jerome Anandeef57322017-01-25 04:27:49 +05303642/* intel_lpe_audio.c */
3643int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
3644void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
3645void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
Jerome Anand46d196e2017-01-25 04:27:50 +05303646void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
Ville Syrjälä20be5512017-04-27 19:02:26 +03003647 enum pipe pipe, enum port port,
3648 const void *eld, int ls_clock, bool dp_output);
Jerome Anandeef57322017-01-25 04:27:49 +05303649
Chris Wilsonf899fc62010-07-20 15:44:45 -07003650/* intel_i2c.c */
Tvrtko Ursulin40196442016-12-01 14:16:42 +00003651extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
3652extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
Jani Nikula88ac7932015-03-27 00:20:22 +02003653extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
3654 unsigned int pin);
Daniel Kurtz3bd7d902012-03-28 02:36:14 +08003655
Jani Nikula0184df42015-03-27 00:20:20 +02003656extern struct i2c_adapter *
3657intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
Chris Wilsone957d772010-09-24 12:52:03 +01003658extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
3659extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
Jan-Simon Möller8f375e12013-05-06 14:52:08 +02003660static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
Chris Wilsonb8232e92010-09-28 16:41:32 +01003661{
3662 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
3663}
Tvrtko Ursulinaf6dc742016-12-01 14:16:44 +00003664extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
Chris Wilsonf899fc62010-07-20 15:44:45 -07003665
Jani Nikula8b8e1a82015-12-14 12:50:49 +02003666/* intel_bios.c */
Jani Nikula66578852017-03-10 15:27:57 +02003667void intel_bios_init(struct drm_i915_private *dev_priv);
Jani Nikulaf0067a32015-12-15 13:16:15 +02003668bool intel_bios_is_valid_vbt(const void *buf, size_t size);
Jani Nikula3bdd14d2016-03-16 12:43:29 +02003669bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
Jani Nikula5a69d132016-03-16 12:43:30 +02003670bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
Ville Syrjälä22f350422016-06-03 12:17:43 +03003671bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
Jani Nikula951d9ef2016-03-16 12:43:31 +02003672bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
Ville Syrjäläd6199252016-05-04 14:45:22 +03003673bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
Jani Nikula7137aec2016-03-16 12:43:32 +02003674bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
Shubhangi Shrivastavad252bf62016-03-31 16:11:47 +05303675bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3676 enum port port);
Shashank Sharma6389dd82016-10-14 19:56:50 +05303677bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
3678 enum port port);
3679
Jesse Barnes723bfd72010-10-07 16:01:13 -07003680/* intel_acpi.c */
3681#ifdef CONFIG_ACPI
3682extern void intel_register_dsm_handler(void);
3683extern void intel_unregister_dsm_handler(void);
3684#else
3685static inline void intel_register_dsm_handler(void) { return; }
3686static inline void intel_unregister_dsm_handler(void) { return; }
3687#endif /* CONFIG_ACPI */
3688
Chris Wilson94b4f3b2016-07-05 10:40:20 +01003689/* intel_device_info.c */
3690static inline struct intel_device_info *
3691mkwrite_device_info(struct drm_i915_private *dev_priv)
3692{
3693 return (struct intel_device_info *)&dev_priv->info;
3694}
3695
Jesse Barnes79e53942008-11-07 14:24:08 -08003696/* modesetting */
Daniel Vetterf8175862012-04-10 15:50:11 +02003697extern void intel_modeset_init_hw(struct drm_device *dev);
Ville Syrjäläb079bd172016-10-25 18:58:02 +03003698extern int intel_modeset_init(struct drm_device *dev);
Jesse Barnes79e53942008-11-07 14:24:08 -08003699extern void intel_modeset_cleanup(struct drm_device *dev);
Chris Wilson1ebaa0b2016-06-24 14:00:15 +01003700extern int intel_connector_register(struct drm_connector *);
Chris Wilsonc191eca2016-06-17 11:40:33 +01003701extern void intel_connector_unregister(struct drm_connector *);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00003702extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
3703 bool state);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +02003704extern void intel_display_resume(struct drm_device *dev);
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +00003705extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
3706extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003707extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02003708extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
Chris Wilson9fcee2f2017-01-26 10:19:19 +00003709extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
Ville Syrjälä11a85d62016-11-28 19:37:12 +02003710extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
Imre Deak5209b1f2014-07-01 12:36:17 +03003711 bool enable);
Zhenyu Wang3bad0782010-04-07 16:15:53 +08003712
Ben Widawskyc0c7bab2012-07-12 11:01:05 -07003713int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3714 struct drm_file *file);
Jesse Barnes575155a2012-03-28 13:39:37 -07003715
Chris Wilson6ef3d422010-08-04 20:26:07 +01003716/* overlay */
Chris Wilsonc0336662016-05-06 15:40:21 +01003717extern struct intel_overlay_error_state *
3718intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03003719extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3720 struct intel_overlay_error_state *error);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +00003721
Chris Wilsonc0336662016-05-06 15:40:21 +01003722extern struct intel_display_error_state *
3723intel_display_capture_error_state(struct drm_i915_private *dev_priv);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03003724extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +00003725 struct intel_display_error_state *error);
Chris Wilson6ef3d422010-08-04 20:26:07 +01003726
Tom O'Rourke151a49d2014-11-13 18:50:10 -08003727int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
3728int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
Imre Deaka0b8a1f2016-12-05 18:27:37 +02003729int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
3730 u32 reply_mask, u32 reply, int timeout_base_ms);
Jani Nikula59de0812013-05-22 15:36:16 +03003731
3732/* intel_sideband.c */
Deepak S707b6e32015-01-16 20:42:17 +05303733u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
Chris Wilson9fcee2f2017-01-26 10:19:19 +00003734int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
Jani Nikula64936252013-05-22 15:36:20 +03003735u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
Deepak Mdfb19ed2016-02-04 18:55:15 +02003736u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
3737void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
Jani Nikulae9f882a2013-08-27 15:12:14 +03003738u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
3739void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3740u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
3741void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
Jesse Barnesf3419152013-11-04 11:52:44 -08003742u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
3743void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003744u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
3745void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
Jani Nikula59de0812013-05-22 15:36:16 +03003746u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
3747 enum intel_sbi_destination destination);
3748void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3749 enum intel_sbi_destination destination);
Shobhit Kumare9fe51c2013-12-10 12:14:55 +05303750u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3751void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003752
Ander Conselvan de Oliveirab7fa22d2016-04-27 15:44:17 +03003753/* intel_dpio_phy.c */
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +02003754void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
Ander Conselvan de Oliveiraed378922016-10-19 10:59:00 +03003755 enum dpio_phy *phy, enum dpio_channel *ch);
Ander Conselvan de Oliveirab6e08202016-10-06 19:22:19 +03003756void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
3757 enum port port, u32 margin, u32 scale,
3758 u32 enable, u32 deemphasis);
Ander Conselvan de Oliveira47a6bc62016-10-06 19:22:17 +03003759void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3760void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3761bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
3762 enum dpio_phy phy);
3763bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
3764 enum dpio_phy phy);
Ville Syrjälä5161d052017-10-27 16:43:48 +03003765uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count);
Ander Conselvan de Oliveira47a6bc62016-10-06 19:22:17 +03003766void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
3767 uint8_t lane_lat_optim_mask);
3768uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
3769
Ander Conselvan de Oliveirab7fa22d2016-04-27 15:44:17 +03003770void chv_set_phy_signal_level(struct intel_encoder *encoder,
3771 u32 deemph_reg_value, u32 margin_reg_value,
3772 bool uniq_trans_scale);
Ander Conselvan de Oliveira844b2f92016-04-27 15:44:18 +03003773void chv_data_lane_soft_reset(struct intel_encoder *encoder,
Ville Syrjälä2e1029c2017-10-31 22:51:18 +02003774 const struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira844b2f92016-04-27 15:44:18 +03003775 bool reset);
Ville Syrjälä2e1029c2017-10-31 22:51:18 +02003776void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
3777 const struct intel_crtc_state *crtc_state);
3778void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
3779 const struct intel_crtc_state *crtc_state);
Ander Conselvan de Oliveirae7d2a7172016-04-27 15:44:20 +03003780void chv_phy_release_cl2_override(struct intel_encoder *encoder);
Ville Syrjälä2e1029c2017-10-31 22:51:18 +02003781void chv_phy_post_pll_disable(struct intel_encoder *encoder,
3782 const struct intel_crtc_state *old_crtc_state);
Ander Conselvan de Oliveirab7fa22d2016-04-27 15:44:17 +03003783
Ander Conselvan de Oliveira53d98722016-04-27 15:44:22 +03003784void vlv_set_phy_signal_level(struct intel_encoder *encoder,
3785 u32 demph_reg_value, u32 preemph_reg_value,
3786 u32 uniqtranscale_reg_value, u32 tx3_demph);
Ville Syrjälä2e1029c2017-10-31 22:51:18 +02003787void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
3788 const struct intel_crtc_state *crtc_state);
3789void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
3790 const struct intel_crtc_state *crtc_state);
3791void vlv_phy_reset_lanes(struct intel_encoder *encoder,
3792 const struct intel_crtc_state *old_crtc_state);
Ander Conselvan de Oliveira53d98722016-04-27 15:44:22 +03003793
Ville Syrjälä616bc822015-01-23 21:04:25 +02003794int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3795int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
Tvrtko Ursulin36cc8b92017-11-21 18:18:51 +00003796u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
Mika Kuoppalac5a0ad12017-03-15 17:43:00 +02003797 const i915_reg_t reg);
Deepak Sc8d9a592013-11-23 14:55:42 +05303798
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00003799u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
3800
Tvrtko Ursulin36cc8b92017-11-21 18:18:51 +00003801static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
3802 const i915_reg_t reg)
3803{
3804 return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
3805}
3806
Ben Widawsky0b274482013-10-04 21:22:51 -07003807#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
3808#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
Keith Packard5f753772010-11-22 09:24:22 +00003809
Ben Widawsky0b274482013-10-04 21:22:51 -07003810#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
3811#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
3812#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
3813#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
Keith Packard5f753772010-11-22 09:24:22 +00003814
Ben Widawsky0b274482013-10-04 21:22:51 -07003815#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
3816#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
3817#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
3818#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
Keith Packard5f753772010-11-22 09:24:22 +00003819
Chris Wilson698b3132014-03-21 13:16:43 +00003820/* Be very careful with read/write 64-bit values. On 32-bit machines, they
3821 * will be implemented using 2 32-bit writes in an arbitrary order with
3822 * an arbitrary delay between them. This can cause the hardware to
3823 * act upon the intermediate value, possibly leading to corruption and
Chris Wilsonb18c1bb2016-09-06 15:45:38 +01003824 * machine death. For this reason we do not support I915_WRITE64, or
3825 * dev_priv->uncore.funcs.mmio_writeq.
3826 *
3827 * When reading a 64-bit value as two 32-bit values, the delay may cause
3828 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
3829 * occasionally a 64-bit register does not actualy support a full readq
3830 * and must be read using two 32-bit reads.
3831 *
3832 * You have been warned.
Chris Wilson698b3132014-03-21 13:16:43 +00003833 */
Ben Widawsky0b274482013-10-04 21:22:51 -07003834#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
Zou Nan haicae58522010-11-09 17:17:32 +08003835
Chris Wilson50877442014-03-21 12:41:53 +00003836#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
Chris Wilsonacd29f72015-09-08 14:17:13 +01003837 u32 upper, lower, old_upper, loop = 0; \
3838 upper = I915_READ(upper_reg); \
Chris Wilsonee0a2272015-07-15 09:50:42 +01003839 do { \
Chris Wilsonacd29f72015-09-08 14:17:13 +01003840 old_upper = upper; \
Chris Wilsonee0a2272015-07-15 09:50:42 +01003841 lower = I915_READ(lower_reg); \
Chris Wilsonacd29f72015-09-08 14:17:13 +01003842 upper = I915_READ(upper_reg); \
3843 } while (upper != old_upper && loop++ < 2); \
Chris Wilsonee0a2272015-07-15 09:50:42 +01003844 (u64)upper << 32 | lower; })
Chris Wilson50877442014-03-21 12:41:53 +00003845
Zou Nan haicae58522010-11-09 17:17:32 +08003846#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
3847#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
3848
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03003849#define __raw_read(x, s) \
Chris Wilson6e3955a2017-03-23 10:19:43 +00003850static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003851 i915_reg_t reg) \
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03003852{ \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003853 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03003854}
3855
3856#define __raw_write(x, s) \
Chris Wilson6e3955a2017-03-23 10:19:43 +00003857static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003858 i915_reg_t reg, uint##x##_t val) \
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03003859{ \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003860 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03003861}
3862__raw_read(8, b)
3863__raw_read(16, w)
3864__raw_read(32, l)
3865__raw_read(64, q)
3866
3867__raw_write(8, b)
3868__raw_write(16, w)
3869__raw_write(32, l)
3870__raw_write(64, q)
3871
3872#undef __raw_read
3873#undef __raw_write
3874
Chris Wilsona6111f72015-04-07 16:21:02 +01003875/* These are untraced mmio-accessors that are only valid to be used inside
Arkadiusz Hileraafee2e2016-10-25 14:48:02 +02003876 * critical sections, such as inside IRQ handlers, where forcewake is explicitly
Chris Wilsona6111f72015-04-07 16:21:02 +01003877 * controlled.
Arkadiusz Hileraafee2e2016-10-25 14:48:02 +02003878 *
Chris Wilsona6111f72015-04-07 16:21:02 +01003879 * Think twice, and think again, before using these.
Arkadiusz Hileraafee2e2016-10-25 14:48:02 +02003880 *
3881 * As an example, these accessors can possibly be used between:
3882 *
3883 * spin_lock_irq(&dev_priv->uncore.lock);
3884 * intel_uncore_forcewake_get__locked();
3885 *
3886 * and
3887 *
3888 * intel_uncore_forcewake_put__locked();
3889 * spin_unlock_irq(&dev_priv->uncore.lock);
3890 *
3891 *
3892 * Note: some registers may not need forcewake held, so
3893 * intel_uncore_forcewake_{get,put} can be omitted, see
3894 * intel_uncore_forcewake_for_reg().
3895 *
3896 * Certain architectures will die if the same cacheline is concurrently accessed
3897 * by different clients (e.g. on Ivybridge). Access to registers should
3898 * therefore generally be serialised, by either the dev_priv->uncore.lock or
3899 * a more localised lock guarding all access to that bank of registers.
Chris Wilsona6111f72015-04-07 16:21:02 +01003900 */
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03003901#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
3902#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
Chris Wilson76f84212016-06-30 15:33:45 +01003903#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
Chris Wilsona6111f72015-04-07 16:21:02 +01003904#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
3905
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02003906/* "Broadcast RGB" property */
3907#define INTEL_BROADCAST_RGB_AUTO 0
3908#define INTEL_BROADCAST_RGB_FULL 1
3909#define INTEL_BROADCAST_RGB_LIMITED 2
Yuanhan Liuba4f01a2010-11-08 17:09:41 +08003910
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01003911static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
Ville Syrjälä766aa1c2013-01-25 21:44:46 +02003912{
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01003913 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ville Syrjälä766aa1c2013-01-25 21:44:46 +02003914 return VLV_VGACNTRL;
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01003915 else if (INTEL_GEN(dev_priv) >= 5)
Sonika Jindal92e23b92014-07-21 15:23:40 +05303916 return CPU_VGACNTRL;
Ville Syrjälä766aa1c2013-01-25 21:44:46 +02003917 else
3918 return VGACNTRL;
3919}
3920
Imre Deakdf977292013-05-21 20:03:17 +03003921static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
3922{
3923 unsigned long j = msecs_to_jiffies(m);
3924
3925 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3926}
3927
Daniel Vetter7bd0e222014-12-04 11:12:54 +01003928static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
3929{
Chris Wilsonb8050142017-08-11 11:57:31 +01003930 /* nsecs_to_jiffies64() does not guard against overflow */
3931 if (NSEC_PER_SEC % HZ &&
3932 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
3933 return MAX_JIFFY_OFFSET;
3934
Daniel Vetter7bd0e222014-12-04 11:12:54 +01003935 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
3936}
3937
Imre Deakdf977292013-05-21 20:03:17 +03003938static inline unsigned long
3939timespec_to_jiffies_timeout(const struct timespec *value)
3940{
3941 unsigned long j = timespec_to_jiffies(value);
3942
3943 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3944}
3945
Paulo Zanonidce56b32013-12-19 14:29:40 -02003946/*
3947 * If you need to wait X milliseconds between events A and B, but event B
3948 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
3949 * when event A happened, then just before event B you call this function and
3950 * pass the timestamp as the first argument, and X as the second argument.
3951 */
3952static inline void
3953wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
3954{
Imre Deakec5e0cf2014-01-29 13:25:40 +02003955 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
Paulo Zanonidce56b32013-12-19 14:29:40 -02003956
3957 /*
3958 * Don't re-read the value of "jiffies" every time since it may change
3959 * behind our back and break the math.
3960 */
3961 tmp_jiffies = jiffies;
3962 target_jiffies = timestamp_jiffies +
3963 msecs_to_jiffies_timeout(to_wait_ms);
3964
3965 if (time_after(target_jiffies, tmp_jiffies)) {
Imre Deakec5e0cf2014-01-29 13:25:40 +02003966 remaining_jiffies = target_jiffies - tmp_jiffies;
3967 while (remaining_jiffies)
3968 remaining_jiffies =
3969 schedule_timeout_uninterruptible(remaining_jiffies);
Paulo Zanonidce56b32013-12-19 14:29:40 -02003970 }
3971}
Chris Wilson221fe792016-09-09 14:11:51 +01003972
3973static inline bool
Chris Wilson754c9fd2017-02-23 07:44:14 +00003974__i915_request_irq_complete(const struct drm_i915_gem_request *req)
Chris Wilson688e6c72016-07-01 17:23:15 +01003975{
Chris Wilsonf69a02c2016-07-01 17:23:16 +01003976 struct intel_engine_cs *engine = req->engine;
Chris Wilson754c9fd2017-02-23 07:44:14 +00003977 u32 seqno;
Chris Wilsonf69a02c2016-07-01 17:23:16 +01003978
Chris Wilson309663a2017-02-23 07:44:07 +00003979 /* Note that the engine may have wrapped around the seqno, and
3980 * so our request->global_seqno will be ahead of the hardware,
3981 * even though it completed the request before wrapping. We catch
3982 * this by kicking all the waiters before resetting the seqno
3983 * in hardware, and also signal the fence.
3984 */
3985 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags))
3986 return true;
3987
Chris Wilson754c9fd2017-02-23 07:44:14 +00003988 /* The request was dequeued before we were awoken. We check after
3989 * inspecting the hw to confirm that this was the same request
3990 * that generated the HWS update. The memory barriers within
3991 * the request execution are sufficient to ensure that a check
3992 * after reading the value from hw matches this request.
3993 */
3994 seqno = i915_gem_request_global_seqno(req);
3995 if (!seqno)
3996 return false;
3997
Chris Wilson7ec2c732016-07-01 17:23:22 +01003998 /* Before we do the heavier coherent read of the seqno,
3999 * check the value (hopefully) in the CPU cacheline.
4000 */
Chris Wilson754c9fd2017-02-23 07:44:14 +00004001 if (__i915_gem_request_completed(req, seqno))
Chris Wilson7ec2c732016-07-01 17:23:22 +01004002 return true;
4003
Chris Wilson688e6c72016-07-01 17:23:15 +01004004 /* Ensure our read of the seqno is coherent so that we
4005 * do not "miss an interrupt" (i.e. if this is the last
4006 * request and the seqno write from the GPU is not visible
4007 * by the time the interrupt fires, we will see that the
4008 * request is incomplete and go back to sleep awaiting
4009 * another interrupt that will never come.)
4010 *
4011 * Strictly, we only need to do this once after an interrupt,
4012 * but it is easier and safer to do it every time the waiter
4013 * is woken.
4014 */
Chris Wilson3d5564e2016-07-01 17:23:23 +01004015 if (engine->irq_seqno_barrier &&
Chris Wilson538b2572017-01-24 15:18:05 +00004016 test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
Chris Wilson56299fb2017-02-27 20:58:48 +00004017 struct intel_breadcrumbs *b = &engine->breadcrumbs;
Chris Wilson99fe4a52016-07-06 12:39:01 +01004018
Chris Wilson3d5564e2016-07-01 17:23:23 +01004019 /* The ordering of irq_posted versus applying the barrier
4020 * is crucial. The clearing of the current irq_posted must
4021 * be visible before we perform the barrier operation,
4022 * such that if a subsequent interrupt arrives, irq_posted
4023 * is reasserted and our task rewoken (which causes us to
4024 * do another __i915_request_irq_complete() immediately
4025 * and reapply the barrier). Conversely, if the clear
4026 * occurs after the barrier, then an interrupt that arrived
4027 * whilst we waited on the barrier would not trigger a
4028 * barrier on the next pass, and the read may not see the
4029 * seqno update.
4030 */
Chris Wilsonf69a02c2016-07-01 17:23:16 +01004031 engine->irq_seqno_barrier(engine);
Chris Wilson99fe4a52016-07-06 12:39:01 +01004032
4033 /* If we consume the irq, but we are no longer the bottom-half,
4034 * the real bottom-half may not have serialised their own
4035 * seqno check with the irq-barrier (i.e. may have inspected
4036 * the seqno before we believe it coherent since they see
4037 * irq_posted == false but we are still running).
4038 */
Tvrtko Ursulin2c33b542017-03-06 15:03:19 +00004039 spin_lock_irq(&b->irq_lock);
Chris Wilson61d3dc72017-03-03 19:08:24 +00004040 if (b->irq_wait && b->irq_wait->tsk != current)
Chris Wilson99fe4a52016-07-06 12:39:01 +01004041 /* Note that if the bottom-half is changed as we
4042 * are sending the wake-up, the new bottom-half will
4043 * be woken by whomever made the change. We only have
4044 * to worry about when we steal the irq-posted for
4045 * ourself.
4046 */
Chris Wilson61d3dc72017-03-03 19:08:24 +00004047 wake_up_process(b->irq_wait->tsk);
Tvrtko Ursulin2c33b542017-03-06 15:03:19 +00004048 spin_unlock_irq(&b->irq_lock);
Chris Wilson99fe4a52016-07-06 12:39:01 +01004049
Chris Wilson754c9fd2017-02-23 07:44:14 +00004050 if (__i915_gem_request_completed(req, seqno))
Chris Wilson7ec2c732016-07-01 17:23:22 +01004051 return true;
4052 }
Chris Wilson688e6c72016-07-01 17:23:15 +01004053
Chris Wilson688e6c72016-07-01 17:23:15 +01004054 return false;
4055}
4056
Chris Wilson0b1de5d2016-08-12 12:39:59 +01004057void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
4058bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
4059
Chris Wilsonc4d3ae62017-01-06 15:20:09 +00004060/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
4061 * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
4062 * perform the operation. To check beforehand, pass in the parameters to
4063 * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
4064 * you only need to pass in the minor offsets, page-aligned pointers are
4065 * always valid.
4066 *
4067 * For just checking for SSE4.1, in the foreknowledge that the future use
4068 * will be correctly aligned, just use i915_has_memcpy_from_wc().
4069 */
4070#define i915_can_memcpy_from_wc(dst, src, len) \
4071 i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
4072
4073#define i915_has_memcpy_from_wc() \
4074 i915_memcpy_from_wc(NULL, NULL, 0)
4075
Chris Wilsonc58305a2016-08-19 16:54:28 +01004076/* i915_mm.c */
4077int remap_io_mapping(struct vm_area_struct *vma,
4078 unsigned long addr, unsigned long pfn, unsigned long size,
4079 struct io_mapping *iomap);
4080
Chris Wilson767a9832017-09-13 09:56:05 +01004081static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
4082{
4083 if (INTEL_GEN(i915) >= 10)
4084 return CNL_HWS_CSB_WRITE_INDEX;
4085 else
4086 return I915_HWS_CSB_WRITE_INDEX;
4087}
4088
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089#endif