blob: 8e9f4378b5a7da5c8ebe05b7e15bf33cb7dab226 [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Damien Lespiaub2c88f52013-10-15 18:55:29 +010033#include <linux/circ_buf.h>
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/drmP.h>
35#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010037#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Daniel Vetterfca52a52014-09-30 10:56:45 +020040/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +030048static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50};
51
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +030052static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54};
55
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +030056static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58};
59
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020060static const u32 hpd_ibx[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050061 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66};
67
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020068static const u32 hpd_cpt[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050069 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010070 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050071 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74};
75
Xiong Zhang26951ca2015-08-17 15:55:50 +080076static const u32 hpd_spt[HPD_NUM_PINS] = {
Ville Syrjälä74c0b392015-08-27 23:56:07 +030077 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
Xiong Zhang26951ca2015-08-17 15:55:50 +080078 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82};
83
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020084static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050085 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91};
92
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020093static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050094 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100};
101
Ville Syrjälä4bca26d2015-05-11 20:49:10 +0300102static const u32 hpd_status_i915[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -0500103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109};
110
Shashank Sharmae0a20ad2015-03-27 14:54:14 +0200111/* BXT hpd list */
112static const u32 hpd_bxt[HPD_NUM_PINS] = {
Sonika Jindal7f3561b2015-08-10 10:35:35 +0530113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
Shashank Sharmae0a20ad2015-03-27 14:54:14 +0200114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116};
117
Paulo Zanoni5c502442014-04-01 15:37:11 -0300118/* IIR can theoretically queue up two events. Be paranoid. */
Paulo Zanonif86f3fb2014-04-01 15:37:14 -0300119#define GEN8_IRQ_RESET_NDX(type, which) do { \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
127} while (0)
128
Paulo Zanonif86f3fb2014-04-01 15:37:14 -0300129#define GEN5_IRQ_RESET(type) do { \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300130 I915_WRITE(type##IMR, 0xffffffff); \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300131 POSTING_READ(type##IMR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300132 I915_WRITE(type##IER, 0); \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300137} while (0)
138
Paulo Zanoni337ba012014-04-01 15:37:16 -0300139/*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 i915_reg_t reg)
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300144{
145 u32 val = I915_READ(reg);
146
147 if (val == 0)
148 return;
149
150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200151 i915_mmio_reg_offset(reg), val);
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300152 I915_WRITE(reg, 0xffffffff);
153 POSTING_READ(reg);
154 I915_WRITE(reg, 0xffffffff);
155 POSTING_READ(reg);
156}
Paulo Zanoni337ba012014-04-01 15:37:16 -0300157
Paulo Zanoni35079892014-04-01 15:37:15 -0300158#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
Ville Syrjälä7d1bd5392014-10-30 19:42:50 +0200161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 POSTING_READ(GEN8_##type##_IMR(which)); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300163} while (0)
164
165#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300167 I915_WRITE(type##IER, (ier_val)); \
Ville Syrjälä7d1bd5392014-10-30 19:42:50 +0200168 I915_WRITE(type##IMR, (imr_val)); \
169 POSTING_READ(type##IMR); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300170} while (0)
171
Imre Deakc9a9a262014-11-05 20:48:37 +0200172static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
Sagar Arun Kamble26705e22016-10-12 21:54:31 +0530173static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
Imre Deakc9a9a262014-11-05 20:48:37 +0200174
Egbert Eich0706f172015-09-23 16:15:27 +0200175/* For display hotplug interrupt */
176static inline void
177i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
178 uint32_t mask,
179 uint32_t bits)
180{
181 uint32_t val;
182
Chris Wilson67520412017-03-02 13:28:01 +0000183 lockdep_assert_held(&dev_priv->irq_lock);
Egbert Eich0706f172015-09-23 16:15:27 +0200184 WARN_ON(bits & ~mask);
185
186 val = I915_READ(PORT_HOTPLUG_EN);
187 val &= ~mask;
188 val |= bits;
189 I915_WRITE(PORT_HOTPLUG_EN, val);
190}
191
192/**
193 * i915_hotplug_interrupt_update - update hotplug interrupt enable
194 * @dev_priv: driver private
195 * @mask: bits to update
196 * @bits: bits to enable
197 * NOTE: the HPD enable bits are modified both inside and outside
198 * of an interrupt context. To avoid that read-modify-write cycles
199 * interfer, these bits are protected by a spinlock. Since this
200 * function is usually not called from a context where the lock is
201 * held already, this function acquires the lock itself. A non-locking
202 * version is also available.
203 */
204void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
205 uint32_t mask,
206 uint32_t bits)
207{
208 spin_lock_irq(&dev_priv->irq_lock);
209 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
210 spin_unlock_irq(&dev_priv->irq_lock);
211}
212
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300213/**
214 * ilk_update_display_irq - update DEIMR
215 * @dev_priv: driver private
216 * @interrupt_mask: mask of interrupt bits to update
217 * @enabled_irq_mask: mask of interrupt bits to enable
218 */
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +0200219void ilk_update_display_irq(struct drm_i915_private *dev_priv,
220 uint32_t interrupt_mask,
221 uint32_t enabled_irq_mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800222{
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300223 uint32_t new_val;
224
Chris Wilson67520412017-03-02 13:28:01 +0000225 lockdep_assert_held(&dev_priv->irq_lock);
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200226
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300227 WARN_ON(enabled_irq_mask & ~interrupt_mask);
228
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700229 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300230 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300231
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300232 new_val = dev_priv->irq_mask;
233 new_val &= ~interrupt_mask;
234 new_val |= (~enabled_irq_mask & interrupt_mask);
235
236 if (new_val != dev_priv->irq_mask) {
237 dev_priv->irq_mask = new_val;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000238 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000239 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800240 }
241}
242
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300243/**
244 * ilk_update_gt_irq - update GTIMR
245 * @dev_priv: driver private
246 * @interrupt_mask: mask of interrupt bits to update
247 * @enabled_irq_mask: mask of interrupt bits to enable
248 */
249static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
250 uint32_t interrupt_mask,
251 uint32_t enabled_irq_mask)
252{
Chris Wilson67520412017-03-02 13:28:01 +0000253 lockdep_assert_held(&dev_priv->irq_lock);
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300254
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100255 WARN_ON(enabled_irq_mask & ~interrupt_mask);
256
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700257 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300258 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300259
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300260 dev_priv->gt_irq_mask &= ~interrupt_mask;
261 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
262 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300263}
264
Daniel Vetter480c8032014-07-16 09:49:40 +0200265void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300266{
267 ilk_update_gt_irq(dev_priv, mask, mask);
Chris Wilson31bb59c2016-07-01 17:23:27 +0100268 POSTING_READ_FW(GTIMR);
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300269}
270
Daniel Vetter480c8032014-07-16 09:49:40 +0200271void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300272{
273 ilk_update_gt_irq(dev_priv, mask, 0);
274}
275
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200276static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200277{
278 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
279}
280
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200281static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
Imre Deaka72fbc32014-11-05 20:48:31 +0200282{
283 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
284}
285
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200286static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200287{
288 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
289}
290
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300291/**
Ville Syrjälä81fd8742015-11-25 16:21:30 +0200292 * snb_update_pm_irq - update GEN6_PMIMR
293 * @dev_priv: driver private
294 * @interrupt_mask: mask of interrupt bits to update
295 * @enabled_irq_mask: mask of interrupt bits to enable
296 */
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300297static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
298 uint32_t interrupt_mask,
299 uint32_t enabled_irq_mask)
300{
Paulo Zanoni605cd252013-08-06 18:57:15 -0300301 uint32_t new_val;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300302
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100303 WARN_ON(enabled_irq_mask & ~interrupt_mask);
304
Chris Wilson67520412017-03-02 13:28:01 +0000305 lockdep_assert_held(&dev_priv->irq_lock);
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300306
Akash Goelf4e9af42016-10-12 21:54:30 +0530307 new_val = dev_priv->pm_imr;
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300308 new_val &= ~interrupt_mask;
309 new_val |= (~enabled_irq_mask & interrupt_mask);
310
Akash Goelf4e9af42016-10-12 21:54:30 +0530311 if (new_val != dev_priv->pm_imr) {
312 dev_priv->pm_imr = new_val;
313 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
Imre Deaka72fbc32014-11-05 20:48:31 +0200314 POSTING_READ(gen6_pm_imr(dev_priv));
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300315 }
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300316}
317
Akash Goelf4e9af42016-10-12 21:54:30 +0530318void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300319{
Imre Deak9939fba2014-11-20 23:01:47 +0200320 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
321 return;
322
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300323 snb_update_pm_irq(dev_priv, mask, mask);
324}
325
Akash Goelf4e9af42016-10-12 21:54:30 +0530326static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
Imre Deak9939fba2014-11-20 23:01:47 +0200327{
328 snb_update_pm_irq(dev_priv, mask, 0);
329}
330
Akash Goelf4e9af42016-10-12 21:54:30 +0530331void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300332{
Imre Deak9939fba2014-11-20 23:01:47 +0200333 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334 return;
335
Akash Goelf4e9af42016-10-12 21:54:30 +0530336 __gen6_mask_pm_irq(dev_priv, mask);
337}
338
339void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
340{
341 i915_reg_t reg = gen6_pm_iir(dev_priv);
342
Chris Wilson67520412017-03-02 13:28:01 +0000343 lockdep_assert_held(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +0530344
345 I915_WRITE(reg, reset_mask);
346 I915_WRITE(reg, reset_mask);
347 POSTING_READ(reg);
348}
349
350void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
351{
Chris Wilson67520412017-03-02 13:28:01 +0000352 lockdep_assert_held(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +0530353
354 dev_priv->pm_ier |= enable_mask;
355 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
356 gen6_unmask_pm_irq(dev_priv, enable_mask);
357 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
358}
359
360void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
361{
Chris Wilson67520412017-03-02 13:28:01 +0000362 lockdep_assert_held(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +0530363
364 dev_priv->pm_ier &= ~disable_mask;
365 __gen6_mask_pm_irq(dev_priv, disable_mask);
366 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
367 /* though a barrier is missing here, but don't really need a one */
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300368}
369
Chris Wilsondc979972016-05-10 14:10:04 +0100370void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
Imre Deak3cc134e2014-11-19 15:30:03 +0200371{
Imre Deak3cc134e2014-11-19 15:30:03 +0200372 spin_lock_irq(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +0530373 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
Imre Deak096fad92015-03-23 19:11:35 +0200374 dev_priv->rps.pm_iir = 0;
Imre Deak3cc134e2014-11-19 15:30:03 +0200375 spin_unlock_irq(&dev_priv->irq_lock);
376}
377
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100378void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200379{
Chris Wilsonf2a91d12016-09-21 14:51:06 +0100380 if (READ_ONCE(dev_priv->rps.interrupts_enabled))
381 return;
382
Imre Deakb900b942014-11-05 20:48:48 +0200383 spin_lock_irq(&dev_priv->irq_lock);
Chris Wilsonc33d2472016-07-04 08:08:36 +0100384 WARN_ON_ONCE(dev_priv->rps.pm_iir);
385 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
Imre Deakd4d70aa2014-11-19 15:30:04 +0200386 dev_priv->rps.interrupts_enabled = true;
Imre Deakb900b942014-11-05 20:48:48 +0200387 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Imre Deak78e68d32014-12-15 18:59:27 +0200388
Imre Deakb900b942014-11-05 20:48:48 +0200389 spin_unlock_irq(&dev_priv->irq_lock);
390}
391
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100392void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200393{
Chris Wilsonf2a91d12016-09-21 14:51:06 +0100394 if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
395 return;
396
Imre Deakd4d70aa2014-11-19 15:30:04 +0200397 spin_lock_irq(&dev_priv->irq_lock);
398 dev_priv->rps.interrupts_enabled = false;
Imre Deak9939fba2014-11-20 23:01:47 +0200399
Dave Gordonb20e3cf2016-09-12 21:19:35 +0100400 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
Imre Deak9939fba2014-11-20 23:01:47 +0200401
Akash Goelf4e9af42016-10-12 21:54:30 +0530402 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Imre Deak58072cc2015-03-23 19:11:34 +0200403
404 spin_unlock_irq(&dev_priv->irq_lock);
Chris Wilson91c8a322016-07-05 10:40:23 +0100405 synchronize_irq(dev_priv->drm.irq);
Chris Wilsonc33d2472016-07-04 08:08:36 +0100406
407 /* Now that we will not be generating any more work, flush any
408 * outsanding tasks. As we are called on the RPS idle path,
409 * we will reset the GPU to minimum frequencies, so the current
410 * state of the worker can be discarded.
411 */
412 cancel_work_sync(&dev_priv->rps.work);
413 gen6_reset_rps_interrupts(dev_priv);
Imre Deakb900b942014-11-05 20:48:48 +0200414}
415
Sagar Arun Kamble26705e22016-10-12 21:54:31 +0530416void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
417{
418 spin_lock_irq(&dev_priv->irq_lock);
419 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
420 spin_unlock_irq(&dev_priv->irq_lock);
421}
422
423void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
424{
425 spin_lock_irq(&dev_priv->irq_lock);
426 if (!dev_priv->guc.interrupts_enabled) {
427 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
428 dev_priv->pm_guc_events);
429 dev_priv->guc.interrupts_enabled = true;
430 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
431 }
432 spin_unlock_irq(&dev_priv->irq_lock);
433}
434
435void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
436{
437 spin_lock_irq(&dev_priv->irq_lock);
438 dev_priv->guc.interrupts_enabled = false;
439
440 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
441
442 spin_unlock_irq(&dev_priv->irq_lock);
443 synchronize_irq(dev_priv->drm.irq);
444
445 gen9_reset_guc_interrupts(dev_priv);
446}
447
Ben Widawsky09610212014-05-15 20:58:08 +0300448/**
Ville Syrjälä81fd8742015-11-25 16:21:30 +0200449 * bdw_update_port_irq - update DE port interrupt
450 * @dev_priv: driver private
451 * @interrupt_mask: mask of interrupt bits to update
452 * @enabled_irq_mask: mask of interrupt bits to enable
453 */
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +0300454static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
455 uint32_t interrupt_mask,
456 uint32_t enabled_irq_mask)
457{
458 uint32_t new_val;
459 uint32_t old_val;
460
Chris Wilson67520412017-03-02 13:28:01 +0000461 lockdep_assert_held(&dev_priv->irq_lock);
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +0300462
463 WARN_ON(enabled_irq_mask & ~interrupt_mask);
464
465 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
466 return;
467
468 old_val = I915_READ(GEN8_DE_PORT_IMR);
469
470 new_val = old_val;
471 new_val &= ~interrupt_mask;
472 new_val |= (~enabled_irq_mask & interrupt_mask);
473
474 if (new_val != old_val) {
475 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
476 POSTING_READ(GEN8_DE_PORT_IMR);
477 }
478}
479
480/**
Ville Syrjälä013d3752015-11-23 18:06:17 +0200481 * bdw_update_pipe_irq - update DE pipe interrupt
482 * @dev_priv: driver private
483 * @pipe: pipe whose interrupt to update
484 * @interrupt_mask: mask of interrupt bits to update
485 * @enabled_irq_mask: mask of interrupt bits to enable
486 */
487void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
488 enum pipe pipe,
489 uint32_t interrupt_mask,
490 uint32_t enabled_irq_mask)
491{
492 uint32_t new_val;
493
Chris Wilson67520412017-03-02 13:28:01 +0000494 lockdep_assert_held(&dev_priv->irq_lock);
Ville Syrjälä013d3752015-11-23 18:06:17 +0200495
496 WARN_ON(enabled_irq_mask & ~interrupt_mask);
497
498 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
499 return;
500
501 new_val = dev_priv->de_irq_mask[pipe];
502 new_val &= ~interrupt_mask;
503 new_val |= (~enabled_irq_mask & interrupt_mask);
504
505 if (new_val != dev_priv->de_irq_mask[pipe]) {
506 dev_priv->de_irq_mask[pipe] = new_val;
507 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
508 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
509 }
510}
511
512/**
Daniel Vetterfee884e2013-07-04 23:35:21 +0200513 * ibx_display_interrupt_update - update SDEIMR
514 * @dev_priv: driver private
515 * @interrupt_mask: mask of interrupt bits to update
516 * @enabled_irq_mask: mask of interrupt bits to enable
517 */
Daniel Vetter47339cd2014-09-30 10:56:46 +0200518void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
519 uint32_t interrupt_mask,
520 uint32_t enabled_irq_mask)
Daniel Vetterfee884e2013-07-04 23:35:21 +0200521{
522 uint32_t sdeimr = I915_READ(SDEIMR);
523 sdeimr &= ~interrupt_mask;
524 sdeimr |= (~enabled_irq_mask & interrupt_mask);
525
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100526 WARN_ON(enabled_irq_mask & ~interrupt_mask);
527
Chris Wilson67520412017-03-02 13:28:01 +0000528 lockdep_assert_held(&dev_priv->irq_lock);
Daniel Vetterfee884e2013-07-04 23:35:21 +0200529
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700530 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300531 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300532
Daniel Vetterfee884e2013-07-04 23:35:21 +0200533 I915_WRITE(SDEIMR, sdeimr);
534 POSTING_READ(SDEIMR);
535}
Paulo Zanoni86642812013-04-12 17:57:57 -0300536
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100537static void
Imre Deak755e9012014-02-10 18:42:47 +0200538__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
539 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800540{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200541 i915_reg_t reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200542 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800543
Chris Wilson67520412017-03-02 13:28:01 +0000544 lockdep_assert_held(&dev_priv->irq_lock);
Daniel Vetterd518ce52014-08-27 10:43:37 +0200545 WARN_ON(!intel_irqs_enabled(dev_priv));
Daniel Vetterb79480b2013-06-27 17:52:10 +0200546
Ville Syrjälä04feced2014-04-03 13:28:33 +0300547 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
548 status_mask & ~PIPESTAT_INT_STATUS_MASK,
549 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
550 pipe_name(pipe), enable_mask, status_mask))
Imre Deak755e9012014-02-10 18:42:47 +0200551 return;
552
553 if ((pipestat & enable_mask) == enable_mask)
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200554 return;
555
Imre Deak91d181d2014-02-10 18:42:49 +0200556 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
557
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200558 /* Enable the interrupt, clear any pending status */
Imre Deak755e9012014-02-10 18:42:47 +0200559 pipestat |= enable_mask | status_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200560 I915_WRITE(reg, pipestat);
561 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800562}
563
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100564static void
Imre Deak755e9012014-02-10 18:42:47 +0200565__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
566 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800567{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200568 i915_reg_t reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200569 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800570
Chris Wilson67520412017-03-02 13:28:01 +0000571 lockdep_assert_held(&dev_priv->irq_lock);
Daniel Vetterd518ce52014-08-27 10:43:37 +0200572 WARN_ON(!intel_irqs_enabled(dev_priv));
Daniel Vetterb79480b2013-06-27 17:52:10 +0200573
Ville Syrjälä04feced2014-04-03 13:28:33 +0300574 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
575 status_mask & ~PIPESTAT_INT_STATUS_MASK,
576 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
577 pipe_name(pipe), enable_mask, status_mask))
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200578 return;
579
Imre Deak755e9012014-02-10 18:42:47 +0200580 if ((pipestat & enable_mask) == 0)
581 return;
582
Imre Deak91d181d2014-02-10 18:42:49 +0200583 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
584
Imre Deak755e9012014-02-10 18:42:47 +0200585 pipestat &= ~enable_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200586 I915_WRITE(reg, pipestat);
587 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800588}
589
Imre Deak10c59c52014-02-10 18:42:48 +0200590static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
591{
592 u32 enable_mask = status_mask << 16;
593
594 /*
Ville Syrjälä724a6902014-04-09 13:28:48 +0300595 * On pipe A we don't support the PSR interrupt yet,
596 * on pipe B and C the same bit MBZ.
Imre Deak10c59c52014-02-10 18:42:48 +0200597 */
598 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
599 return 0;
Ville Syrjälä724a6902014-04-09 13:28:48 +0300600 /*
601 * On pipe B and C we don't support the PSR interrupt yet, on pipe
602 * A the same bit is for perf counters which we don't use either.
603 */
604 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
605 return 0;
Imre Deak10c59c52014-02-10 18:42:48 +0200606
607 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
608 SPRITE0_FLIP_DONE_INT_EN_VLV |
609 SPRITE1_FLIP_DONE_INT_EN_VLV);
610 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
611 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
612 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
613 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
614
615 return enable_mask;
616}
617
Imre Deak755e9012014-02-10 18:42:47 +0200618void
619i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
620 u32 status_mask)
621{
622 u32 enable_mask;
623
Wayne Boyer666a4532015-12-09 12:29:35 -0800624 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilson91c8a322016-07-05 10:40:23 +0100625 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
Imre Deak10c59c52014-02-10 18:42:48 +0200626 status_mask);
627 else
628 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200629 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
630}
631
632void
633i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
634 u32 status_mask)
635{
636 u32 enable_mask;
637
Wayne Boyer666a4532015-12-09 12:29:35 -0800638 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilson91c8a322016-07-05 10:40:23 +0100639 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
Imre Deak10c59c52014-02-10 18:42:48 +0200640 status_mask);
641 else
642 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200643 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
644}
645
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000646/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300647 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100648 * @dev_priv: i915 device private
Zhao Yakui01c66882009-10-28 05:10:00 +0000649 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100650static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
Zhao Yakui01c66882009-10-28 05:10:00 +0000651{
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100652 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300653 return;
654
Daniel Vetter13321782014-09-15 14:55:29 +0200655 spin_lock_irq(&dev_priv->irq_lock);
Zhao Yakui01c66882009-10-28 05:10:00 +0000656
Imre Deak755e9012014-02-10 18:42:47 +0200657 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100658 if (INTEL_GEN(dev_priv) >= 4)
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200659 i915_enable_pipestat(dev_priv, PIPE_A,
Imre Deak755e9012014-02-10 18:42:47 +0200660 PIPE_LEGACY_BLC_EVENT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000661
Daniel Vetter13321782014-09-15 14:55:29 +0200662 spin_unlock_irq(&dev_priv->irq_lock);
Zhao Yakui01c66882009-10-28 05:10:00 +0000663}
664
Ville Syrjäläf75f3742014-05-15 20:20:36 +0300665/*
666 * This timing diagram depicts the video signal in and
667 * around the vertical blanking period.
668 *
669 * Assumptions about the fictitious mode used in this example:
670 * vblank_start >= 3
671 * vsync_start = vblank_start + 1
672 * vsync_end = vblank_start + 2
673 * vtotal = vblank_start + 3
674 *
675 * start of vblank:
676 * latch double buffered registers
677 * increment frame counter (ctg+)
678 * generate start of vblank interrupt (gen4+)
679 * |
680 * | frame start:
681 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
682 * | may be shifted forward 1-3 extra lines via PIPECONF
683 * | |
684 * | | start of vsync:
685 * | | generate vsync interrupt
686 * | | |
687 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
688 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
689 * ----va---> <-----------------vb--------------------> <--------va-------------
690 * | | <----vs-----> |
691 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
692 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
693 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
694 * | | |
695 * last visible pixel first visible pixel
696 * | increment frame counter (gen3/4)
697 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
698 *
699 * x = horizontal active
700 * _ = horizontal blanking
701 * hs = horizontal sync
702 * va = vertical active
703 * vb = vertical blanking
704 * vs = vertical sync
705 * vbs = vblank_start (number)
706 *
707 * Summary:
708 * - most events happen at the start of horizontal sync
709 * - frame start happens at the start of horizontal blank, 1-4 lines
710 * (depending on PIPECONF settings) after the start of vblank
711 * - gen3/4 pixel and frame counter are synchronized with the start
712 * of horizontal active on the first line of vertical active
713 */
714
Keith Packard42f52ef2008-10-18 19:39:29 -0700715/* Called from drm generic code, passed a 'crtc', which
716 * we use as a pipe index
717 */
Thierry Reding88e72712015-09-24 18:35:31 +0200718static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700719{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100720 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200721 i915_reg_t high_frame, low_frame;
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300722 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
Daniel Vetter5caa0fe2017-05-09 16:03:29 +0200723 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
Ville Syrjälä694e4092017-03-09 17:44:30 +0200724 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700725
Daniel Vetterf3a5c3f2015-02-13 21:03:44 +0100726 htotal = mode->crtc_htotal;
727 hsync_start = mode->crtc_hsync_start;
728 vbl_start = mode->crtc_vblank_start;
729 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
730 vbl_start = DIV_ROUND_UP(vbl_start, 2);
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300731
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300732 /* Convert to pixel count */
733 vbl_start *= htotal;
734
735 /* Start of vblank event occurs at start of hsync */
736 vbl_start -= htotal - hsync_start;
737
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800738 high_frame = PIPEFRAME(pipe);
739 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100740
Ville Syrjälä694e4092017-03-09 17:44:30 +0200741 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
742
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700743 /*
744 * High & low register fields aren't synchronized, so make sure
745 * we get a low value that's stable across two reads of the high
746 * register.
747 */
748 do {
Ville Syrjälä694e4092017-03-09 17:44:30 +0200749 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
750 low = I915_READ_FW(low_frame);
751 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700752 } while (high1 != high2);
753
Ville Syrjälä694e4092017-03-09 17:44:30 +0200754 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
755
Chris Wilson5eddb702010-09-11 13:48:45 +0100756 high1 >>= PIPE_FRAME_HIGH_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300757 pixel = low & PIPE_PIXEL_MASK;
Chris Wilson5eddb702010-09-11 13:48:45 +0100758 low >>= PIPE_FRAME_LOW_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300759
760 /*
761 * The frame counter increments at beginning of active.
762 * Cook up a vblank counter by also checking the pixel
763 * counter against vblank start.
764 */
Ville Syrjäläedc08d02013-11-06 13:56:27 -0200765 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700766}
767
Dave Airlie974e59b2015-10-30 09:45:33 +1000768static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800769{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100770 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800771
Ville Syrjälä649636e2015-09-22 19:50:01 +0300772 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800773}
774
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300775/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
Ville Syrjäläa225f072014-04-29 13:35:45 +0300776static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
777{
778 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100779 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter5caa0fe2017-05-09 16:03:29 +0200780 const struct drm_display_mode *mode;
781 struct drm_vblank_crtc *vblank;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300782 enum pipe pipe = crtc->pipe;
Ville Syrjälä80715b22014-05-15 20:23:23 +0300783 int position, vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300784
Ville Syrjälä72259532017-03-02 19:15:05 +0200785 if (!crtc->active)
786 return -1;
787
Daniel Vetter5caa0fe2017-05-09 16:03:29 +0200788 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
789 mode = &vblank->hwmode;
790
Ville Syrjälä80715b22014-05-15 20:23:23 +0300791 vtotal = mode->crtc_vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300792 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
793 vtotal /= 2;
794
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100795 if (IS_GEN2(dev_priv))
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300796 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300797 else
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300798 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300799
800 /*
Jesse Barnes41b578f2015-09-22 12:15:54 -0700801 * On HSW, the DSL reg (0x70000) appears to return 0 if we
802 * read it just before the start of vblank. So try it again
803 * so we don't accidentally end up spanning a vblank frame
804 * increment, causing the pipe_update_end() code to squak at us.
805 *
806 * The nature of this problem means we can't simply check the ISR
807 * bit and return the vblank start value; nor can we use the scanline
808 * debug register in the transcoder as it appears to have the same
809 * problem. We may need to extend this to include other platforms,
810 * but so far testing only shows the problem on HSW.
811 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100812 if (HAS_DDI(dev_priv) && !position) {
Jesse Barnes41b578f2015-09-22 12:15:54 -0700813 int i, temp;
814
815 for (i = 0; i < 100; i++) {
816 udelay(1);
Ville Syrjälä707bdd32017-03-09 17:44:31 +0200817 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
Jesse Barnes41b578f2015-09-22 12:15:54 -0700818 if (temp != position) {
819 position = temp;
820 break;
821 }
822 }
823 }
824
825 /*
Ville Syrjälä80715b22014-05-15 20:23:23 +0300826 * See update_scanline_offset() for the details on the
827 * scanline_offset adjustment.
Ville Syrjäläa225f072014-04-29 13:35:45 +0300828 */
Ville Syrjälä80715b22014-05-15 20:23:23 +0300829 return (position + crtc->scanline_offset) % vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300830}
831
Daniel Vetter1bf6ad62017-05-09 16:03:28 +0200832static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
833 bool in_vblank_irq, int *vpos, int *hpos,
834 ktime_t *stime, ktime_t *etime,
835 const struct drm_display_mode *mode)
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100836{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100837 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä98187832016-10-31 22:37:10 +0200838 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
839 pipe);
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300840 int position;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300841 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100842 bool in_vbl = true;
Mario Kleinerad3543e2013-10-30 05:13:08 +0100843 unsigned long irqflags;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100844
Maarten Lankhorstfc467a222015-06-01 12:50:07 +0200845 if (WARN_ON(!mode->crtc_clock)) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100846 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800847 "pipe %c\n", pipe_name(pipe));
Daniel Vetter1bf6ad62017-05-09 16:03:28 +0200848 return false;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100849 }
850
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300851 htotal = mode->crtc_htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300852 hsync_start = mode->crtc_hsync_start;
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300853 vtotal = mode->crtc_vtotal;
854 vbl_start = mode->crtc_vblank_start;
855 vbl_end = mode->crtc_vblank_end;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100856
Ville Syrjäläd31faf62013-10-28 16:31:41 +0200857 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
858 vbl_start = DIV_ROUND_UP(vbl_start, 2);
859 vbl_end /= 2;
860 vtotal /= 2;
861 }
862
Mario Kleinerad3543e2013-10-30 05:13:08 +0100863 /*
864 * Lock uncore.lock, as we will do multiple timing critical raw
865 * register reads, potentially with preemption disabled, so the
866 * following code must not block on uncore.lock.
867 */
868 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300869
Mario Kleinerad3543e2013-10-30 05:13:08 +0100870 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
871
872 /* Get optional system timestamp before query. */
873 if (stime)
874 *stime = ktime_get();
875
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100876 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100877 /* No obvious pixelcount register. Only query vertical
878 * scanout position from Display scan line register.
879 */
Ville Syrjäläa225f072014-04-29 13:35:45 +0300880 position = __intel_get_crtc_scanline(intel_crtc);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100881 } else {
882 /* Have access to pixelcount since start of frame.
883 * We can split this into vertical and horizontal
884 * scanout position.
885 */
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300886 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100887
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300888 /* convert to pixel counts */
889 vbl_start *= htotal;
890 vbl_end *= htotal;
891 vtotal *= htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300892
893 /*
Ville Syrjälä7e78f1cb2014-04-29 13:35:49 +0300894 * In interlaced modes, the pixel counter counts all pixels,
895 * so one field will have htotal more pixels. In order to avoid
896 * the reported position from jumping backwards when the pixel
897 * counter is beyond the length of the shorter field, just
898 * clamp the position the length of the shorter field. This
899 * matches how the scanline counter based position works since
900 * the scanline counter doesn't count the two half lines.
901 */
902 if (position >= vtotal)
903 position = vtotal - 1;
904
905 /*
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300906 * Start of vblank interrupt is triggered at start of hsync,
907 * just prior to the first active line of vblank. However we
908 * consider lines to start at the leading edge of horizontal
909 * active. So, should we get here before we've crossed into
910 * the horizontal active of the first line in vblank, we would
911 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
912 * always add htotal-hsync_start to the current pixel position.
913 */
914 position = (position + htotal - hsync_start) % vtotal;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300915 }
916
Mario Kleinerad3543e2013-10-30 05:13:08 +0100917 /* Get optional system timestamp after query. */
918 if (etime)
919 *etime = ktime_get();
920
921 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
922
923 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
924
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300925 in_vbl = position >= vbl_start && position < vbl_end;
926
927 /*
928 * While in vblank, position will be negative
929 * counting up towards 0 at vbl_end. And outside
930 * vblank, position will be positive counting
931 * up since vbl_end.
932 */
933 if (position >= vbl_start)
934 position -= vbl_end;
935 else
936 position += vtotal - vbl_end;
937
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100938 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300939 *vpos = position;
940 *hpos = 0;
941 } else {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100942 *vpos = position / htotal;
943 *hpos = position - (*vpos * htotal);
944 }
945
Daniel Vetter1bf6ad62017-05-09 16:03:28 +0200946 return true;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100947}
948
Ville Syrjäläa225f072014-04-29 13:35:45 +0300949int intel_get_crtc_scanline(struct intel_crtc *crtc)
950{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100951 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläa225f072014-04-29 13:35:45 +0300952 unsigned long irqflags;
953 int position;
954
955 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
956 position = __intel_get_crtc_scanline(crtc);
957 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
958
959 return position;
960}
961
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100962static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800963{
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000964 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +0200965 u8 new_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200966
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200967 spin_lock(&mchdev_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800968
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200969 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
970
Daniel Vetter20e4d402012-08-08 23:35:39 +0200971 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200972
Jesse Barnes7648fa92010-05-20 14:28:11 -0700973 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000974 busy_up = I915_READ(RCPREVBSYTUPAVG);
975 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800976 max_avg = I915_READ(RCBMAXAVG);
977 min_avg = I915_READ(RCBMINAVG);
978
979 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000980 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200981 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
982 new_delay = dev_priv->ips.cur_delay - 1;
983 if (new_delay < dev_priv->ips.max_delay)
984 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000985 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200986 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
987 new_delay = dev_priv->ips.cur_delay + 1;
988 if (new_delay > dev_priv->ips.min_delay)
989 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800990 }
991
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100992 if (ironlake_set_drps(dev_priv, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +0200993 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800994
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200995 spin_unlock(&mchdev_lock);
Daniel Vetter92703882012-08-09 16:46:01 +0200996
Jesse Barnesf97108d2010-01-29 11:27:07 -0800997 return;
998}
999
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001000static void notify_ring(struct intel_engine_cs *engine)
Chris Wilson549f7362010-10-19 11:19:32 +01001001{
Chris Wilson56299fb2017-02-27 20:58:48 +00001002 struct drm_i915_gem_request *rq = NULL;
1003 struct intel_wait *wait;
Tvrtko Ursulindffabc82017-02-21 09:13:48 +00001004
Chris Wilson2246bea2017-02-17 15:13:00 +00001005 atomic_inc(&engine->irq_count);
Chris Wilson538b2572017-01-24 15:18:05 +00001006 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
Chris Wilson56299fb2017-02-27 20:58:48 +00001007
Chris Wilson61d3dc72017-03-03 19:08:24 +00001008 spin_lock(&engine->breadcrumbs.irq_lock);
1009 wait = engine->breadcrumbs.irq_wait;
Chris Wilson56299fb2017-02-27 20:58:48 +00001010 if (wait) {
1011 /* We use a callback from the dma-fence to submit
1012 * requests after waiting on our own requests. To
1013 * ensure minimum delay in queuing the next request to
1014 * hardware, signal the fence now rather than wait for
1015 * the signaler to be woken up. We still wake up the
1016 * waiter in order to handle the irq-seqno coherency
1017 * issues (we may receive the interrupt before the
1018 * seqno is written, see __i915_request_irq_complete())
1019 * and to handle coalescing of multiple seqno updates
1020 * and many waiters.
1021 */
1022 if (i915_seqno_passed(intel_engine_get_seqno(engine),
Chris Wilsondb939912017-03-15 21:07:26 +00001023 wait->seqno) &&
1024 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1025 &wait->request->fence.flags))
Chris Wilson24754d72017-03-03 14:45:57 +00001026 rq = i915_gem_request_get(wait->request);
Chris Wilson56299fb2017-02-27 20:58:48 +00001027
1028 wake_up_process(wait->tsk);
Chris Wilson67b807a82017-02-27 20:58:50 +00001029 } else {
1030 __intel_engine_disarm_breadcrumbs(engine);
Chris Wilson56299fb2017-02-27 20:58:48 +00001031 }
Chris Wilson61d3dc72017-03-03 19:08:24 +00001032 spin_unlock(&engine->breadcrumbs.irq_lock);
Chris Wilson56299fb2017-02-27 20:58:48 +00001033
Chris Wilson24754d72017-03-03 14:45:57 +00001034 if (rq) {
Chris Wilson56299fb2017-02-27 20:58:48 +00001035 dma_fence_signal(&rq->fence);
Chris Wilson24754d72017-03-03 14:45:57 +00001036 i915_gem_request_put(rq);
1037 }
Chris Wilson56299fb2017-02-27 20:58:48 +00001038
1039 trace_intel_engine_notify(engine, wait);
Chris Wilson549f7362010-10-19 11:19:32 +01001040}
1041
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001042static void vlv_c0_read(struct drm_i915_private *dev_priv,
1043 struct intel_rps_ei *ei)
Deepak S31685c22014-07-03 17:33:01 -04001044{
Mika Kuoppala679cb6c2017-03-15 17:43:03 +02001045 ei->ktime = ktime_get_raw();
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001046 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1047 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
Deepak S31685c22014-07-03 17:33:01 -04001048}
1049
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001050void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1051{
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001052 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001053}
1054
1055static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1056{
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001057 const struct intel_rps_ei *prev = &dev_priv->rps.ei;
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001058 struct intel_rps_ei now;
1059 u32 events = 0;
1060
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001061 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001062 return 0;
1063
1064 vlv_c0_read(dev_priv, &now);
Deepak S31685c22014-07-03 17:33:01 -04001065
Mika Kuoppala679cb6c2017-03-15 17:43:03 +02001066 if (prev->ktime) {
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001067 u64 time, c0;
Chris Wilson569884e2017-03-09 21:12:31 +00001068 u32 render, media;
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001069
Mika Kuoppala679cb6c2017-03-15 17:43:03 +02001070 time = ktime_us_delta(now.ktime, prev->ktime);
Chris Wilson8f68d592017-03-13 17:06:17 +00001071
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001072 time *= dev_priv->czclk_freq;
1073
1074 /* Workload can be split between render + media,
1075 * e.g. SwapBuffers being blitted in X after being rendered in
1076 * mesa. To account for this we need to combine both engines
1077 * into our activity counter.
1078 */
Chris Wilson569884e2017-03-09 21:12:31 +00001079 render = now.render_c0 - prev->render_c0;
1080 media = now.media_c0 - prev->media_c0;
1081 c0 = max(render, media);
Mika Kuoppala6b7f6aa2017-03-15 18:12:59 +02001082 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001083
1084 if (c0 > time * dev_priv->rps.up_threshold)
1085 events = GEN6_PM_RP_UP_THRESHOLD;
1086 else if (c0 < time * dev_priv->rps.down_threshold)
1087 events = GEN6_PM_RP_DOWN_THRESHOLD;
Deepak S31685c22014-07-03 17:33:01 -04001088 }
1089
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001090 dev_priv->rps.ei = now;
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001091 return events;
Deepak S31685c22014-07-03 17:33:01 -04001092}
1093
Chris Wilsonf5a4c672015-04-27 13:41:23 +01001094static bool any_waiters(struct drm_i915_private *dev_priv)
1095{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001096 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05301097 enum intel_engine_id id;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01001098
Akash Goel3b3f1652016-10-13 22:44:48 +05301099 for_each_engine(engine, dev_priv, id)
Chris Wilson688e6c72016-07-01 17:23:15 +01001100 if (intel_engine_has_waiter(engine))
Chris Wilsonf5a4c672015-04-27 13:41:23 +01001101 return true;
1102
1103 return false;
1104}
1105
Ben Widawsky4912d042011-04-25 11:25:20 -07001106static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001107{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001108 struct drm_i915_private *dev_priv =
1109 container_of(work, struct drm_i915_private, rps.work);
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001110 bool client_boost = false;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001111 int new_delay, adj, min, max;
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001112 u32 pm_iir = 0;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001113
Daniel Vetter59cdb632013-07-04 23:35:28 +02001114 spin_lock_irq(&dev_priv->irq_lock);
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001115 if (dev_priv->rps.interrupts_enabled) {
1116 pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
1117 client_boost = fetch_and_zero(&dev_priv->rps.client_boost);
Imre Deakd4d70aa2014-11-19 15:30:04 +02001118 }
Daniel Vetter59cdb632013-07-04 23:35:28 +02001119 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawsky4912d042011-04-25 11:25:20 -07001120
Paulo Zanoni60611c12013-08-15 11:50:01 -03001121 /* Make sure we didn't queue anything we're not going to process. */
Deepak Sa6706b42014-03-15 20:23:22 +05301122 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
Chris Wilson8d3afd72015-05-21 21:01:47 +01001123 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001124 goto out;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001125
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001126 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01001127
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001128 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1129
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001130 adj = dev_priv->rps.last_adj;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001131 new_delay = dev_priv->rps.cur_freq;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001132 min = dev_priv->rps.min_freq_softlimit;
1133 max = dev_priv->rps.max_freq_softlimit;
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001134 if (client_boost || any_waiters(dev_priv))
1135 max = dev_priv->rps.max_freq;
1136 if (client_boost && new_delay < dev_priv->rps.boost_freq) {
1137 new_delay = dev_priv->rps.boost_freq;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001138 adj = 0;
1139 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001140 if (adj > 0)
1141 adj *= 2;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001142 else /* CHV needs even encode values */
1143 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
Sagar Arun Kamble7e79a682017-01-20 09:18:24 +05301144
1145 if (new_delay >= dev_priv->rps.max_freq_softlimit)
1146 adj = 0;
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001147 } else if (client_boost || any_waiters(dev_priv)) {
Chris Wilsonf5a4c672015-04-27 13:41:23 +01001148 adj = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001149 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
Ben Widawskyb39fb292014-03-19 18:31:11 -07001150 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1151 new_delay = dev_priv->rps.efficient_freq;
Chris Wilson17136d52017-02-10 15:03:47 +00001152 else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
Ben Widawskyb39fb292014-03-19 18:31:11 -07001153 new_delay = dev_priv->rps.min_freq_softlimit;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001154 adj = 0;
1155 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1156 if (adj < 0)
1157 adj *= 2;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001158 else /* CHV needs even encode values */
1159 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
Sagar Arun Kamble7e79a682017-01-20 09:18:24 +05301160
1161 if (new_delay <= dev_priv->rps.min_freq_softlimit)
1162 adj = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001163 } else { /* unknown event */
Chris Wilsonedcf2842015-04-07 16:20:29 +01001164 adj = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001165 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001166
Chris Wilsonedcf2842015-04-07 16:20:29 +01001167 dev_priv->rps.last_adj = adj;
1168
Ben Widawsky79249632012-09-07 19:43:42 -07001169 /* sysfs frequency interfaces may have snuck in while servicing the
1170 * interrupt
1171 */
Chris Wilsonedcf2842015-04-07 16:20:29 +01001172 new_delay += adj;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001173 new_delay = clamp_t(int, new_delay, min, max);
Deepak S27544362014-01-27 21:35:05 +05301174
Chris Wilson9fcee2f2017-01-26 10:19:19 +00001175 if (intel_set_rps(dev_priv, new_delay)) {
1176 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1177 dev_priv->rps.last_adj = 0;
1178 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001179
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001180 mutex_unlock(&dev_priv->rps.hw_lock);
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001181
1182out:
1183 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1184 spin_lock_irq(&dev_priv->irq_lock);
1185 if (dev_priv->rps.interrupts_enabled)
1186 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1187 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001188}
1189
Ben Widawskye3689192012-05-25 16:56:22 -07001190
1191/**
1192 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1193 * occurred.
1194 * @work: workqueue struct
1195 *
1196 * Doesn't actually do anything except notify userspace. As a consequence of
1197 * this event, userspace should try to remap the bad rows since statistically
1198 * it is likely the same row is more likely to go bad again.
1199 */
1200static void ivybridge_parity_work(struct work_struct *work)
1201{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001202 struct drm_i915_private *dev_priv =
Joonas Lahtinencefcff82017-04-28 10:58:39 +03001203 container_of(work, typeof(*dev_priv), l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001204 u32 error_status, row, bank, subbank;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001205 char *parity_event[6];
Ben Widawskye3689192012-05-25 16:56:22 -07001206 uint32_t misccpctl;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001207 uint8_t slice = 0;
Ben Widawskye3689192012-05-25 16:56:22 -07001208
1209 /* We must turn off DOP level clock gating to access the L3 registers.
1210 * In order to prevent a get/put style interface, acquire struct mutex
1211 * any time we access those registers.
1212 */
Chris Wilson91c8a322016-07-05 10:40:23 +01001213 mutex_lock(&dev_priv->drm.struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001214
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001215 /* If we've screwed up tracking, just let the interrupt fire again */
1216 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1217 goto out;
1218
Ben Widawskye3689192012-05-25 16:56:22 -07001219 misccpctl = I915_READ(GEN7_MISCCPCTL);
1220 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1221 POSTING_READ(GEN7_MISCCPCTL);
1222
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001223 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001224 i915_reg_t reg;
Ben Widawskye3689192012-05-25 16:56:22 -07001225
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001226 slice--;
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001227 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001228 break;
1229
1230 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1231
Ville Syrjälä6fa1c5f2015-11-04 23:20:02 +02001232 reg = GEN7_L3CDERRST1(slice);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001233
1234 error_status = I915_READ(reg);
1235 row = GEN7_PARITY_ERROR_ROW(error_status);
1236 bank = GEN7_PARITY_ERROR_BANK(error_status);
1237 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1238
1239 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1240 POSTING_READ(reg);
1241
1242 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1243 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1244 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1245 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1246 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1247 parity_event[5] = NULL;
1248
Chris Wilson91c8a322016-07-05 10:40:23 +01001249 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001250 KOBJ_CHANGE, parity_event);
1251
1252 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1253 slice, row, bank, subbank);
1254
1255 kfree(parity_event[4]);
1256 kfree(parity_event[3]);
1257 kfree(parity_event[2]);
1258 kfree(parity_event[1]);
1259 }
Ben Widawskye3689192012-05-25 16:56:22 -07001260
1261 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1262
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001263out:
1264 WARN_ON(dev_priv->l3_parity.which_slice);
Daniel Vetter4cb21832014-09-15 14:55:26 +02001265 spin_lock_irq(&dev_priv->irq_lock);
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001266 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
Daniel Vetter4cb21832014-09-15 14:55:26 +02001267 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001268
Chris Wilson91c8a322016-07-05 10:40:23 +01001269 mutex_unlock(&dev_priv->drm.struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001270}
1271
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001272static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1273 u32 iir)
Ben Widawskye3689192012-05-25 16:56:22 -07001274{
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001275 if (!HAS_L3_DPF(dev_priv))
Ben Widawskye3689192012-05-25 16:56:22 -07001276 return;
1277
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001278 spin_lock(&dev_priv->irq_lock);
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001279 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001280 spin_unlock(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001281
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001282 iir &= GT_PARITY_ERROR(dev_priv);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001283 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1284 dev_priv->l3_parity.which_slice |= 1 << 1;
1285
1286 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1287 dev_priv->l3_parity.which_slice |= 1 << 0;
1288
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001289 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001290}
1291
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001292static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001293 u32 gt_iir)
1294{
Chris Wilsonf8973c22016-07-01 17:23:21 +01001295 if (gt_iir & GT_RENDER_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301296 notify_ring(dev_priv->engine[RCS]);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001297 if (gt_iir & ILK_BSD_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301298 notify_ring(dev_priv->engine[VCS]);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001299}
1300
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001301static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001302 u32 gt_iir)
1303{
Chris Wilsonf8973c22016-07-01 17:23:21 +01001304 if (gt_iir & GT_RENDER_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301305 notify_ring(dev_priv->engine[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001306 if (gt_iir & GT_BSD_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301307 notify_ring(dev_priv->engine[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001308 if (gt_iir & GT_BLT_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301309 notify_ring(dev_priv->engine[BCS]);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001310
Ben Widawskycc609d52013-05-28 19:22:29 -07001311 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1312 GT_BSD_CS_ERROR_INTERRUPT |
Daniel Vetteraaecdf62014-11-04 15:52:22 +01001313 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1314 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
Ben Widawskye3689192012-05-25 16:56:22 -07001315
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001316 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1317 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001318}
1319
Chris Wilson5d3d69d2017-05-17 13:10:06 +01001320static void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001321gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
Nick Hoathfbcc1a02015-10-20 10:23:52 +01001322{
Chris Wilson31de7352017-03-16 12:56:18 +00001323 bool tasklet = false;
Chris Wilsonf7470262017-01-24 15:20:21 +00001324
1325 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
Chris Wilsona4b2b012017-05-17 13:10:01 +01001326 if (port_count(&engine->execlist_port[0])) {
Chris Wilson955a4b82017-05-17 13:10:07 +01001327 __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
Chris Wilsona4b2b012017-05-17 13:10:01 +01001328 tasklet = true;
1329 }
Chris Wilsonf7470262017-01-24 15:20:21 +00001330 }
Chris Wilson31de7352017-03-16 12:56:18 +00001331
1332 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
1333 notify_ring(engine);
1334 tasklet |= i915.enable_guc_submission;
1335 }
1336
1337 if (tasklet)
1338 tasklet_hi_schedule(&engine->irq_tasklet);
Nick Hoathfbcc1a02015-10-20 10:23:52 +01001339}
1340
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001341static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1342 u32 master_ctl,
1343 u32 gt_iir[4])
Ben Widawskyabd58f02013-11-02 21:07:09 -07001344{
Ben Widawskyabd58f02013-11-02 21:07:09 -07001345 irqreturn_t ret = IRQ_NONE;
1346
1347 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001348 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1349 if (gt_iir[0]) {
1350 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001351 ret = IRQ_HANDLED;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001352 } else
1353 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1354 }
1355
Zhao Yakui85f9b5f2014-04-17 10:37:38 +08001356 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001357 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1358 if (gt_iir[1]) {
1359 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001360 ret = IRQ_HANDLED;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001361 } else
1362 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1363 }
1364
Chris Wilson74cdb332015-04-07 16:21:05 +01001365 if (master_ctl & GEN8_GT_VECS_IRQ) {
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001366 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1367 if (gt_iir[3]) {
1368 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
Chris Wilson74cdb332015-04-07 16:21:05 +01001369 ret = IRQ_HANDLED;
Chris Wilson74cdb332015-04-07 16:21:05 +01001370 } else
1371 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1372 }
1373
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301374 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001375 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301376 if (gt_iir[2] & (dev_priv->pm_rps_events |
1377 dev_priv->pm_guc_events)) {
Chris Wilsoncb0d2052015-04-07 16:21:04 +01001378 I915_WRITE_FW(GEN8_GT_IIR(2),
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301379 gt_iir[2] & (dev_priv->pm_rps_events |
1380 dev_priv->pm_guc_events));
Oscar Mateo38cc46d2014-06-16 16:10:59 +01001381 ret = IRQ_HANDLED;
Ben Widawsky09610212014-05-15 20:58:08 +03001382 } else
1383 DRM_ERROR("The master control interrupt lied (PM)!\n");
1384 }
1385
Ben Widawskyabd58f02013-11-02 21:07:09 -07001386 return ret;
1387}
1388
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001389static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1390 u32 gt_iir[4])
1391{
1392 if (gt_iir[0]) {
Akash Goel3b3f1652016-10-13 22:44:48 +05301393 gen8_cs_irq_handler(dev_priv->engine[RCS],
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001394 gt_iir[0], GEN8_RCS_IRQ_SHIFT);
Akash Goel3b3f1652016-10-13 22:44:48 +05301395 gen8_cs_irq_handler(dev_priv->engine[BCS],
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001396 gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1397 }
1398
1399 if (gt_iir[1]) {
Akash Goel3b3f1652016-10-13 22:44:48 +05301400 gen8_cs_irq_handler(dev_priv->engine[VCS],
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001401 gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
Akash Goel3b3f1652016-10-13 22:44:48 +05301402 gen8_cs_irq_handler(dev_priv->engine[VCS2],
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001403 gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1404 }
1405
1406 if (gt_iir[3])
Akash Goel3b3f1652016-10-13 22:44:48 +05301407 gen8_cs_irq_handler(dev_priv->engine[VECS],
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001408 gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1409
1410 if (gt_iir[2] & dev_priv->pm_rps_events)
1411 gen6_rps_irq_handler(dev_priv, gt_iir[2]);
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301412
1413 if (gt_iir[2] & dev_priv->pm_guc_events)
1414 gen9_guc_irq_handler(dev_priv, gt_iir[2]);
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001415}
1416
Imre Deak63c88d22015-07-20 14:43:39 -07001417static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1418{
1419 switch (port) {
1420 case PORT_A:
Ville Syrjälä195baa02015-08-27 23:56:00 +03001421 return val & PORTA_HOTPLUG_LONG_DETECT;
Imre Deak63c88d22015-07-20 14:43:39 -07001422 case PORT_B:
1423 return val & PORTB_HOTPLUG_LONG_DETECT;
1424 case PORT_C:
1425 return val & PORTC_HOTPLUG_LONG_DETECT;
Imre Deak63c88d22015-07-20 14:43:39 -07001426 default:
1427 return false;
1428 }
1429}
1430
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03001431static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1432{
1433 switch (port) {
1434 case PORT_E:
1435 return val & PORTE_HOTPLUG_LONG_DETECT;
1436 default:
1437 return false;
1438 }
1439}
1440
Ville Syrjälä74c0b392015-08-27 23:56:07 +03001441static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1442{
1443 switch (port) {
1444 case PORT_A:
1445 return val & PORTA_HOTPLUG_LONG_DETECT;
1446 case PORT_B:
1447 return val & PORTB_HOTPLUG_LONG_DETECT;
1448 case PORT_C:
1449 return val & PORTC_HOTPLUG_LONG_DETECT;
1450 case PORT_D:
1451 return val & PORTD_HOTPLUG_LONG_DETECT;
1452 default:
1453 return false;
1454 }
1455}
1456
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03001457static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1458{
1459 switch (port) {
1460 case PORT_A:
1461 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1462 default:
1463 return false;
1464 }
1465}
1466
Jani Nikula676574d2015-05-28 15:43:53 +03001467static bool pch_port_hotplug_long_detect(enum port port, u32 val)
Dave Airlie13cf5502014-06-18 11:29:35 +10001468{
1469 switch (port) {
Dave Airlie13cf5502014-06-18 11:29:35 +10001470 case PORT_B:
Jani Nikula676574d2015-05-28 15:43:53 +03001471 return val & PORTB_HOTPLUG_LONG_DETECT;
Dave Airlie13cf5502014-06-18 11:29:35 +10001472 case PORT_C:
Jani Nikula676574d2015-05-28 15:43:53 +03001473 return val & PORTC_HOTPLUG_LONG_DETECT;
Dave Airlie13cf5502014-06-18 11:29:35 +10001474 case PORT_D:
Jani Nikula676574d2015-05-28 15:43:53 +03001475 return val & PORTD_HOTPLUG_LONG_DETECT;
1476 default:
1477 return false;
Dave Airlie13cf5502014-06-18 11:29:35 +10001478 }
1479}
1480
Jani Nikula676574d2015-05-28 15:43:53 +03001481static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
Dave Airlie13cf5502014-06-18 11:29:35 +10001482{
1483 switch (port) {
Dave Airlie13cf5502014-06-18 11:29:35 +10001484 case PORT_B:
Jani Nikula676574d2015-05-28 15:43:53 +03001485 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
Dave Airlie13cf5502014-06-18 11:29:35 +10001486 case PORT_C:
Jani Nikula676574d2015-05-28 15:43:53 +03001487 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
Dave Airlie13cf5502014-06-18 11:29:35 +10001488 case PORT_D:
Jani Nikula676574d2015-05-28 15:43:53 +03001489 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1490 default:
1491 return false;
Dave Airlie13cf5502014-06-18 11:29:35 +10001492 }
1493}
1494
Ville Syrjälä42db67d2015-08-28 21:26:27 +03001495/*
1496 * Get a bit mask of pins that have triggered, and which ones may be long.
1497 * This can be called multiple times with the same masks to accumulate
1498 * hotplug detection results from several registers.
1499 *
1500 * Note that the caller is expected to zero out the masks initially.
1501 */
Imre Deakfd63e2a2015-07-21 15:32:44 -07001502static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
Jani Nikula8c841e52015-06-18 13:06:17 +03001503 u32 hotplug_trigger, u32 dig_hotplug_reg,
Imre Deakfd63e2a2015-07-21 15:32:44 -07001504 const u32 hpd[HPD_NUM_PINS],
1505 bool long_pulse_detect(enum port port, u32 val))
Jani Nikula676574d2015-05-28 15:43:53 +03001506{
Jani Nikula8c841e52015-06-18 13:06:17 +03001507 enum port port;
Jani Nikula676574d2015-05-28 15:43:53 +03001508 int i;
1509
Jani Nikula676574d2015-05-28 15:43:53 +03001510 for_each_hpd_pin(i) {
Jani Nikula8c841e52015-06-18 13:06:17 +03001511 if ((hpd[i] & hotplug_trigger) == 0)
1512 continue;
Jani Nikula676574d2015-05-28 15:43:53 +03001513
Jani Nikula8c841e52015-06-18 13:06:17 +03001514 *pin_mask |= BIT(i);
1515
Imre Deakcc24fcd2015-07-21 15:32:45 -07001516 if (!intel_hpd_pin_to_port(i, &port))
1517 continue;
1518
Imre Deakfd63e2a2015-07-21 15:32:44 -07001519 if (long_pulse_detect(port, dig_hotplug_reg))
Jani Nikula8c841e52015-06-18 13:06:17 +03001520 *long_mask |= BIT(i);
Jani Nikula676574d2015-05-28 15:43:53 +03001521 }
1522
1523 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1524 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1525
1526}
1527
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001528static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001529{
Daniel Vetter28c70f12012-12-01 13:53:45 +01001530 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001531}
1532
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001533static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
Daniel Vetterce99c252012-12-01 13:53:47 +01001534{
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001535 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +01001536}
1537
Shuang He8bf1e9f2013-10-15 18:55:27 +01001538#if defined(CONFIG_DEBUG_FS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001539static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1540 enum pipe pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001541 uint32_t crc0, uint32_t crc1,
1542 uint32_t crc2, uint32_t crc3,
1543 uint32_t crc4)
Shuang He8bf1e9f2013-10-15 18:55:27 +01001544{
Shuang He8bf1e9f2013-10-15 18:55:27 +01001545 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1546 struct intel_pipe_crc_entry *entry;
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +01001547 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1548 struct drm_driver *driver = dev_priv->drm.driver;
1549 uint32_t crcs[5];
Damien Lespiauac2300d2013-10-15 18:55:30 +01001550 int head, tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001551
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001552 spin_lock(&pipe_crc->lock);
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +01001553 if (pipe_crc->source) {
1554 if (!pipe_crc->entries) {
1555 spin_unlock(&pipe_crc->lock);
1556 DRM_DEBUG_KMS("spurious interrupt\n");
1557 return;
1558 }
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001559
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +01001560 head = pipe_crc->head;
1561 tail = pipe_crc->tail;
1562
1563 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1564 spin_unlock(&pipe_crc->lock);
1565 DRM_ERROR("CRC buffer overflowing\n");
1566 return;
1567 }
1568
1569 entry = &pipe_crc->entries[head];
1570
1571 entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
1572 entry->crc[0] = crc0;
1573 entry->crc[1] = crc1;
1574 entry->crc[2] = crc2;
1575 entry->crc[3] = crc3;
1576 entry->crc[4] = crc4;
1577
1578 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1579 pipe_crc->head = head;
1580
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001581 spin_unlock(&pipe_crc->lock);
Damien Lespiau0c912c72013-10-15 18:55:37 +01001582
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +01001583 wake_up_interruptible(&pipe_crc->wq);
1584 } else {
1585 /*
1586 * For some not yet identified reason, the first CRC is
1587 * bonkers. So let's just wait for the next vblank and read
1588 * out the buggy result.
1589 *
1590 * On CHV sometimes the second CRC is bonkers as well, so
1591 * don't trust that one either.
1592 */
1593 if (pipe_crc->skipped == 0 ||
1594 (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) {
1595 pipe_crc->skipped++;
1596 spin_unlock(&pipe_crc->lock);
1597 return;
1598 }
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001599 spin_unlock(&pipe_crc->lock);
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +01001600 crcs[0] = crc0;
1601 crcs[1] = crc1;
1602 crcs[2] = crc2;
1603 crcs[3] = crc3;
1604 crcs[4] = crc4;
Tomeu Vizoso246ee522017-01-10 14:43:05 +01001605 drm_crtc_add_crc_entry(&crtc->base, true,
1606 drm_accurate_vblank_count(&crtc->base),
1607 crcs);
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001608 }
Shuang He8bf1e9f2013-10-15 18:55:27 +01001609}
Daniel Vetter277de952013-10-18 16:37:07 +02001610#else
1611static inline void
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001612display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1613 enum pipe pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001614 uint32_t crc0, uint32_t crc1,
1615 uint32_t crc2, uint32_t crc3,
1616 uint32_t crc4) {}
1617#endif
Daniel Vettereba94eb2013-10-16 22:55:46 +02001618
Daniel Vetter277de952013-10-18 16:37:07 +02001619
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001620static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1621 enum pipe pipe)
Daniel Vetter5a69b892013-10-16 22:55:52 +02001622{
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001623 display_pipe_crc_irq_handler(dev_priv, pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001624 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1625 0, 0, 0, 0);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001626}
1627
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001628static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1629 enum pipe pipe)
Daniel Vettereba94eb2013-10-16 22:55:46 +02001630{
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001631 display_pipe_crc_irq_handler(dev_priv, pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001632 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1633 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1634 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1635 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1636 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
Daniel Vettereba94eb2013-10-16 22:55:46 +02001637}
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001638
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001639static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1640 enum pipe pipe)
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001641{
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001642 uint32_t res1, res2;
1643
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001644 if (INTEL_GEN(dev_priv) >= 3)
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001645 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1646 else
1647 res1 = 0;
1648
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001649 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001650 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1651 else
1652 res2 = 0;
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001653
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001654 display_pipe_crc_irq_handler(dev_priv, pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001655 I915_READ(PIPE_CRC_RES_RED(pipe)),
1656 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1657 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1658 res1, res2);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001659}
Shuang He8bf1e9f2013-10-15 18:55:27 +01001660
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001661/* The RPS events need forcewake, so we add them to a work queue and mask their
1662 * IMR bits until the work is done. Other interrupts can be processed without
1663 * the work queue. */
1664static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
Ben Widawskybaf02a12013-05-28 19:22:24 -07001665{
Deepak Sa6706b42014-03-15 20:23:22 +05301666 if (pm_iir & dev_priv->pm_rps_events) {
Daniel Vetter59cdb632013-07-04 23:35:28 +02001667 spin_lock(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +05301668 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
Imre Deakd4d70aa2014-11-19 15:30:04 +02001669 if (dev_priv->rps.interrupts_enabled) {
1670 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
Chris Wilsonc33d2472016-07-04 08:08:36 +01001671 schedule_work(&dev_priv->rps.work);
Imre Deakd4d70aa2014-11-19 15:30:04 +02001672 }
Daniel Vetter59cdb632013-07-04 23:35:28 +02001673 spin_unlock(&dev_priv->irq_lock);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001674 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001675
Imre Deakc9a9a262014-11-05 20:48:37 +02001676 if (INTEL_INFO(dev_priv)->gen >= 8)
1677 return;
1678
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001679 if (HAS_VEBOX(dev_priv)) {
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001680 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301681 notify_ring(dev_priv->engine[VECS]);
Ben Widawsky12638c52013-05-28 19:22:31 -07001682
Daniel Vetteraaecdf62014-11-04 15:52:22 +01001683 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1684 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
Ben Widawsky12638c52013-05-28 19:22:31 -07001685 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001686}
1687
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301688static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1689{
1690 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
Sagar Arun Kamble4100b2a2016-10-12 21:54:32 +05301691 /* Sample the log buffer flush related bits & clear them out now
1692 * itself from the message identity register to minimize the
1693 * probability of losing a flush interrupt, when there are back
1694 * to back flush interrupts.
1695 * There can be a new flush interrupt, for different log buffer
1696 * type (like for ISR), whilst Host is handling one (for DPC).
1697 * Since same bit is used in message register for ISR & DPC, it
1698 * could happen that GuC sets the bit for 2nd interrupt but Host
1699 * clears out the bit on handling the 1st interrupt.
1700 */
1701 u32 msg, flush;
1702
1703 msg = I915_READ(SOFT_SCRATCH(15));
Arkadiusz Hilera80bc452016-11-25 18:59:34 +01001704 flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
1705 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
Sagar Arun Kamble4100b2a2016-10-12 21:54:32 +05301706 if (flush) {
1707 /* Clear the message bits that are handled */
1708 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
1709
1710 /* Handle flush interrupt in bottom half */
Oscar Mateoe7465472017-03-22 10:39:48 -07001711 queue_work(dev_priv->guc.log.runtime.flush_wq,
1712 &dev_priv->guc.log.runtime.flush_work);
Akash Goel5aa1ee42016-10-12 21:54:36 +05301713
1714 dev_priv->guc.log.flush_interrupt_count++;
Sagar Arun Kamble4100b2a2016-10-12 21:54:32 +05301715 } else {
1716 /* Not clearing of unhandled event bits won't result in
1717 * re-triggering of the interrupt.
1718 */
1719 }
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301720 }
1721}
1722
Daniel Vetter5a21b662016-05-24 17:13:53 +02001723static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001724 enum pipe pipe)
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03001725{
Daniel Vetter5a21b662016-05-24 17:13:53 +02001726 bool ret;
1727
Chris Wilson91c8a322016-07-05 10:40:23 +01001728 ret = drm_handle_vblank(&dev_priv->drm, pipe);
Daniel Vetter5a21b662016-05-24 17:13:53 +02001729 if (ret)
Maarten Lankhorst51cbaf02016-05-17 15:07:49 +02001730 intel_finish_page_flip_mmio(dev_priv, pipe);
Daniel Vetter5a21b662016-05-24 17:13:53 +02001731
1732 return ret;
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03001733}
1734
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001735static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1736 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
Imre Deakc1874ed2014-02-04 21:35:46 +02001737{
Imre Deakc1874ed2014-02-04 21:35:46 +02001738 int pipe;
1739
Imre Deak58ead0d2014-02-04 21:35:47 +02001740 spin_lock(&dev_priv->irq_lock);
Ville Syrjälä1ca993d2016-02-18 21:54:26 +02001741
1742 if (!dev_priv->display_irqs_enabled) {
1743 spin_unlock(&dev_priv->irq_lock);
1744 return;
1745 }
1746
Damien Lespiau055e3932014-08-18 13:49:10 +01001747 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001748 i915_reg_t reg;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001749 u32 mask, iir_bit = 0;
Imre Deak91d181d2014-02-10 18:42:49 +02001750
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001751 /*
1752 * PIPESTAT bits get signalled even when the interrupt is
1753 * disabled with the mask bits, and some of the status bits do
1754 * not generate interrupts at all (like the underrun bit). Hence
1755 * we need to be careful that we only handle what we want to
1756 * handle.
1757 */
Daniel Vetter0f239f42014-09-30 10:56:49 +02001758
1759 /* fifo underruns are filterered in the underrun handler. */
1760 mask = PIPE_FIFO_UNDERRUN_STATUS;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001761
1762 switch (pipe) {
1763 case PIPE_A:
1764 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1765 break;
1766 case PIPE_B:
1767 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1768 break;
Ville Syrjälä3278f672014-04-09 13:28:49 +03001769 case PIPE_C:
1770 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1771 break;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001772 }
1773 if (iir & iir_bit)
1774 mask |= dev_priv->pipestat_irq_mask[pipe];
1775
1776 if (!mask)
Imre Deak91d181d2014-02-10 18:42:49 +02001777 continue;
1778
1779 reg = PIPESTAT(pipe);
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001780 mask |= PIPESTAT_INT_ENABLE_MASK;
1781 pipe_stats[pipe] = I915_READ(reg) & mask;
Imre Deakc1874ed2014-02-04 21:35:46 +02001782
1783 /*
1784 * Clear the PIPE*STAT regs before the IIR
1785 */
Imre Deak91d181d2014-02-10 18:42:49 +02001786 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1787 PIPESTAT_INT_STATUS_MASK))
Imre Deakc1874ed2014-02-04 21:35:46 +02001788 I915_WRITE(reg, pipe_stats[pipe]);
1789 }
Imre Deak58ead0d2014-02-04 21:35:47 +02001790 spin_unlock(&dev_priv->irq_lock);
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001791}
1792
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001793static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001794 u32 pipe_stats[I915_MAX_PIPES])
1795{
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001796 enum pipe pipe;
Imre Deakc1874ed2014-02-04 21:35:46 +02001797
Damien Lespiau055e3932014-08-18 13:49:10 +01001798 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02001799 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1800 intel_pipe_handle_vblank(dev_priv, pipe))
1801 intel_check_page_flip(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001802
Maarten Lankhorst5251f042016-05-17 15:07:47 +02001803 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
Maarten Lankhorst51cbaf02016-05-17 15:07:49 +02001804 intel_finish_page_flip_cs(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001805
1806 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001807 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001808
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001809 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1810 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001811 }
1812
1813 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001814 gmbus_irq_handler(dev_priv);
Imre Deakc1874ed2014-02-04 21:35:46 +02001815}
1816
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001817static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001818{
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001819 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001820
1821 if (hotplug_status)
1822 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1823
1824 return hotplug_status;
1825}
1826
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001827static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001828 u32 hotplug_status)
1829{
Ville Syrjälä42db67d2015-08-28 21:26:27 +03001830 u32 pin_mask = 0, long_mask = 0;
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001831
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001832 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1833 IS_CHERRYVIEW(dev_priv)) {
Jani Nikula0d2e4292015-05-27 15:03:39 +03001834 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001835
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001836 if (hotplug_trigger) {
1837 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1838 hotplug_trigger, hpd_status_g4x,
1839 i9xx_port_hotplug_long_detect);
1840
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001841 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001842 }
Jani Nikula369712e2015-05-27 15:03:40 +03001843
1844 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001845 dp_aux_irq_handler(dev_priv);
Jani Nikula0d2e4292015-05-27 15:03:39 +03001846 } else {
1847 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001848
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001849 if (hotplug_trigger) {
1850 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
Daniel Vetter44cc6c02015-09-30 08:47:41 +02001851 hotplug_trigger, hpd_status_i915,
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001852 i9xx_port_hotplug_long_detect);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001853 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001854 }
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001855 }
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001856}
1857
Daniel Vetterff1f5252012-10-02 15:10:55 +02001858static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001859{
Daniel Vetter45a83f82014-05-12 19:17:55 +02001860 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01001861 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001862 irqreturn_t ret = IRQ_NONE;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001863
Imre Deak2dd2a882015-02-24 11:14:30 +02001864 if (!intel_irqs_enabled(dev_priv))
1865 return IRQ_NONE;
1866
Imre Deak1f814da2015-12-16 02:52:19 +02001867 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1868 disable_rpm_wakeref_asserts(dev_priv);
1869
Ville Syrjälä1e1cace2016-04-13 21:19:52 +03001870 do {
Ville Syrjälä6e814802016-04-13 21:19:53 +03001871 u32 iir, gt_iir, pm_iir;
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001872 u32 pipe_stats[I915_MAX_PIPES] = {};
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001873 u32 hotplug_status = 0;
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001874 u32 ier = 0;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001875
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001876 gt_iir = I915_READ(GTIIR);
1877 pm_iir = I915_READ(GEN6_PMIIR);
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001878 iir = I915_READ(VLV_IIR);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001879
1880 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
Ville Syrjälä1e1cace2016-04-13 21:19:52 +03001881 break;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001882
1883 ret = IRQ_HANDLED;
1884
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001885 /*
1886 * Theory on interrupt generation, based on empirical evidence:
1887 *
1888 * x = ((VLV_IIR & VLV_IER) ||
1889 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1890 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1891 *
1892 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1893 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1894 * guarantee the CPU interrupt will be raised again even if we
1895 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1896 * bits this time around.
1897 */
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03001898 I915_WRITE(VLV_MASTER_IER, 0);
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001899 ier = I915_READ(VLV_IER);
1900 I915_WRITE(VLV_IER, 0);
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03001901
1902 if (gt_iir)
1903 I915_WRITE(GTIIR, gt_iir);
1904 if (pm_iir)
1905 I915_WRITE(GEN6_PMIIR, pm_iir);
1906
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001907 if (iir & I915_DISPLAY_PORT_INTERRUPT)
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001908 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001909
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001910 /* Call regardless, as some status bits might not be
1911 * signalled in iir */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001912 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001913
Jerome Anandeef57322017-01-25 04:27:49 +05301914 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1915 I915_LPE_PIPE_B_INTERRUPT))
1916 intel_lpe_audio_irq_handler(dev_priv);
1917
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001918 /*
1919 * VLV_IIR is single buffered, and reflects the level
1920 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1921 */
1922 if (iir)
1923 I915_WRITE(VLV_IIR, iir);
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03001924
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001925 I915_WRITE(VLV_IER, ier);
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03001926 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1927 POSTING_READ(VLV_MASTER_IER);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001928
Ville Syrjälä52894872016-04-13 21:19:56 +03001929 if (gt_iir)
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001930 snb_gt_irq_handler(dev_priv, gt_iir);
Ville Syrjälä52894872016-04-13 21:19:56 +03001931 if (pm_iir)
1932 gen6_rps_irq_handler(dev_priv, pm_iir);
1933
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001934 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001935 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001936
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001937 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
Ville Syrjälä1e1cace2016-04-13 21:19:52 +03001938 } while (0);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001939
Imre Deak1f814da2015-12-16 02:52:19 +02001940 enable_rpm_wakeref_asserts(dev_priv);
1941
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001942 return ret;
1943}
1944
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001945static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1946{
Daniel Vetter45a83f82014-05-12 19:17:55 +02001947 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01001948 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001949 irqreturn_t ret = IRQ_NONE;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001950
Imre Deak2dd2a882015-02-24 11:14:30 +02001951 if (!intel_irqs_enabled(dev_priv))
1952 return IRQ_NONE;
1953
Imre Deak1f814da2015-12-16 02:52:19 +02001954 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1955 disable_rpm_wakeref_asserts(dev_priv);
1956
Chris Wilson579de732016-03-14 09:01:57 +00001957 do {
Ville Syrjälä6e814802016-04-13 21:19:53 +03001958 u32 master_ctl, iir;
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001959 u32 gt_iir[4] = {};
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001960 u32 pipe_stats[I915_MAX_PIPES] = {};
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001961 u32 hotplug_status = 0;
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001962 u32 ier = 0;
1963
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001964 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1965 iir = I915_READ(VLV_IIR);
Ville Syrjälä3278f672014-04-09 13:28:49 +03001966
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001967 if (master_ctl == 0 && iir == 0)
1968 break;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001969
Oscar Mateo27b6c122014-06-16 16:11:00 +01001970 ret = IRQ_HANDLED;
1971
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001972 /*
1973 * Theory on interrupt generation, based on empirical evidence:
1974 *
1975 * x = ((VLV_IIR & VLV_IER) ||
1976 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1977 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1978 *
1979 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1980 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1981 * guarantee the CPU interrupt will be raised again even if we
1982 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1983 * bits this time around.
1984 */
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001985 I915_WRITE(GEN8_MASTER_IRQ, 0);
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001986 ier = I915_READ(VLV_IER);
1987 I915_WRITE(VLV_IER, 0);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001988
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001989 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001990
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001991 if (iir & I915_DISPLAY_PORT_INTERRUPT)
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001992 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001993
Oscar Mateo27b6c122014-06-16 16:11:00 +01001994 /* Call regardless, as some status bits might not be
1995 * signalled in iir */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001996 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001997
Jerome Anandeef57322017-01-25 04:27:49 +05301998 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1999 I915_LPE_PIPE_B_INTERRUPT |
2000 I915_LPE_PIPE_C_INTERRUPT))
2001 intel_lpe_audio_irq_handler(dev_priv);
2002
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03002003 /*
2004 * VLV_IIR is single buffered, and reflects the level
2005 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2006 */
2007 if (iir)
2008 I915_WRITE(VLV_IIR, iir);
2009
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03002010 I915_WRITE(VLV_IER, ier);
Ville Syrjäläe5328c42016-04-13 21:19:47 +03002011 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03002012 POSTING_READ(GEN8_MASTER_IRQ);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002013
Ville Syrjäläe30e2512016-04-13 21:19:58 +03002014 gen8_gt_irq_handler(dev_priv, gt_iir);
2015
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002016 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002017 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03002018
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002019 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
Chris Wilson579de732016-03-14 09:01:57 +00002020 } while (0);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002021
Imre Deak1f814da2015-12-16 02:52:19 +02002022 enable_rpm_wakeref_asserts(dev_priv);
2023
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002024 return ret;
2025}
2026
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002027static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2028 u32 hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002029 const u32 hpd[HPD_NUM_PINS])
2030{
Ville Syrjälä40e56412015-08-27 23:56:10 +03002031 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2032
Jani Nikula6a39d7c2015-11-25 16:47:22 +02002033 /*
2034 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2035 * unless we touch the hotplug register, even if hotplug_trigger is
2036 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2037 * errors.
2038 */
Ville Syrjälä40e56412015-08-27 23:56:10 +03002039 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
Jani Nikula6a39d7c2015-11-25 16:47:22 +02002040 if (!hotplug_trigger) {
2041 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2042 PORTD_HOTPLUG_STATUS_MASK |
2043 PORTC_HOTPLUG_STATUS_MASK |
2044 PORTB_HOTPLUG_STATUS_MASK;
2045 dig_hotplug_reg &= ~mask;
2046 }
2047
Ville Syrjälä40e56412015-08-27 23:56:10 +03002048 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
Jani Nikula6a39d7c2015-11-25 16:47:22 +02002049 if (!hotplug_trigger)
2050 return;
Ville Syrjälä40e56412015-08-27 23:56:10 +03002051
2052 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2053 dig_hotplug_reg, hpd,
2054 pch_port_hotplug_long_detect);
2055
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002056 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä40e56412015-08-27 23:56:10 +03002057}
2058
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002059static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08002060{
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002061 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02002062 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08002063
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002064 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
Daniel Vetter91d131d2013-06-27 17:52:14 +02002065
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03002066 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2067 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2068 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08002069 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03002070 port_name(port));
2071 }
Jesse Barnes776ad802011-01-04 15:09:39 -08002072
Daniel Vetterce99c252012-12-01 13:53:47 +01002073 if (pch_iir & SDE_AUX_MASK)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002074 dp_aux_irq_handler(dev_priv);
Daniel Vetterce99c252012-12-01 13:53:47 +01002075
Jesse Barnes776ad802011-01-04 15:09:39 -08002076 if (pch_iir & SDE_GMBUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002077 gmbus_irq_handler(dev_priv);
Jesse Barnes776ad802011-01-04 15:09:39 -08002078
2079 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2080 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2081
2082 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2083 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2084
2085 if (pch_iir & SDE_POISON)
2086 DRM_ERROR("PCH poison interrupt\n");
2087
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002088 if (pch_iir & SDE_FDI_MASK)
Damien Lespiau055e3932014-08-18 13:49:10 +01002089 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002090 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2091 pipe_name(pipe),
2092 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08002093
2094 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2095 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2096
2097 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2098 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2099
Jesse Barnes776ad802011-01-04 15:09:39 -08002100 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002101 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
Paulo Zanoni86642812013-04-12 17:57:57 -03002102
2103 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002104 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
Paulo Zanoni86642812013-04-12 17:57:57 -03002105}
2106
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002107static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
Paulo Zanoni86642812013-04-12 17:57:57 -03002108{
Paulo Zanoni86642812013-04-12 17:57:57 -03002109 u32 err_int = I915_READ(GEN7_ERR_INT);
Daniel Vetter5a69b892013-10-16 22:55:52 +02002110 enum pipe pipe;
Paulo Zanoni86642812013-04-12 17:57:57 -03002111
Paulo Zanonide032bf2013-04-12 17:57:58 -03002112 if (err_int & ERR_INT_POISON)
2113 DRM_ERROR("Poison interrupt\n");
2114
Damien Lespiau055e3932014-08-18 13:49:10 +01002115 for_each_pipe(dev_priv, pipe) {
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002116 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2117 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Paulo Zanoni86642812013-04-12 17:57:57 -03002118
Daniel Vetter5a69b892013-10-16 22:55:52 +02002119 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002120 if (IS_IVYBRIDGE(dev_priv))
2121 ivb_pipe_crc_irq_handler(dev_priv, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02002122 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002123 hsw_pipe_crc_irq_handler(dev_priv, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02002124 }
2125 }
Shuang He8bf1e9f2013-10-15 18:55:27 +01002126
Paulo Zanoni86642812013-04-12 17:57:57 -03002127 I915_WRITE(GEN7_ERR_INT, err_int);
2128}
2129
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002130static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
Paulo Zanoni86642812013-04-12 17:57:57 -03002131{
Paulo Zanoni86642812013-04-12 17:57:57 -03002132 u32 serr_int = I915_READ(SERR_INT);
2133
Paulo Zanonide032bf2013-04-12 17:57:58 -03002134 if (serr_int & SERR_INT_POISON)
2135 DRM_ERROR("PCH poison interrupt\n");
2136
Paulo Zanoni86642812013-04-12 17:57:57 -03002137 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002138 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
Paulo Zanoni86642812013-04-12 17:57:57 -03002139
2140 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002141 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
Paulo Zanoni86642812013-04-12 17:57:57 -03002142
2143 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002144 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
Paulo Zanoni86642812013-04-12 17:57:57 -03002145
2146 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08002147}
2148
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002149static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
Adam Jackson23e81d62012-06-06 15:45:44 -04002150{
Adam Jackson23e81d62012-06-06 15:45:44 -04002151 int pipe;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002152 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04002153
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002154 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
Daniel Vetter91d131d2013-06-27 17:52:14 +02002155
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03002156 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2157 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2158 SDE_AUDIO_POWER_SHIFT_CPT);
2159 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2160 port_name(port));
2161 }
Adam Jackson23e81d62012-06-06 15:45:44 -04002162
2163 if (pch_iir & SDE_AUX_MASK_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002164 dp_aux_irq_handler(dev_priv);
Adam Jackson23e81d62012-06-06 15:45:44 -04002165
2166 if (pch_iir & SDE_GMBUS_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002167 gmbus_irq_handler(dev_priv);
Adam Jackson23e81d62012-06-06 15:45:44 -04002168
2169 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2170 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2171
2172 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2173 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2174
2175 if (pch_iir & SDE_FDI_MASK_CPT)
Damien Lespiau055e3932014-08-18 13:49:10 +01002176 for_each_pipe(dev_priv, pipe)
Adam Jackson23e81d62012-06-06 15:45:44 -04002177 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2178 pipe_name(pipe),
2179 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03002180
2181 if (pch_iir & SDE_ERROR_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002182 cpt_serr_int_handler(dev_priv);
Adam Jackson23e81d62012-06-06 15:45:44 -04002183}
2184
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002185static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002186{
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002187 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2188 ~SDE_PORTE_HOTPLUG_SPT;
2189 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2190 u32 pin_mask = 0, long_mask = 0;
2191
2192 if (hotplug_trigger) {
2193 u32 dig_hotplug_reg;
2194
2195 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2196 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2197
2198 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2199 dig_hotplug_reg, hpd_spt,
Ville Syrjälä74c0b392015-08-27 23:56:07 +03002200 spt_port_hotplug_long_detect);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002201 }
2202
2203 if (hotplug2_trigger) {
2204 u32 dig_hotplug_reg;
2205
2206 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2207 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2208
2209 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2210 dig_hotplug_reg, hpd_spt,
2211 spt_port_hotplug2_long_detect);
2212 }
2213
2214 if (pin_mask)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002215 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002216
2217 if (pch_iir & SDE_GMBUS_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002218 gmbus_irq_handler(dev_priv);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002219}
2220
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002221static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2222 u32 hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002223 const u32 hpd[HPD_NUM_PINS])
2224{
Ville Syrjälä40e56412015-08-27 23:56:10 +03002225 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2226
2227 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2228 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2229
2230 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2231 dig_hotplug_reg, hpd,
2232 ilk_port_hotplug_long_detect);
2233
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002234 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä40e56412015-08-27 23:56:10 +03002235}
2236
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002237static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2238 u32 de_iir)
Paulo Zanonic008bc62013-07-12 16:35:10 -03002239{
Daniel Vetter40da17c22013-10-21 18:04:36 +02002240 enum pipe pipe;
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03002241 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2242
Ville Syrjälä40e56412015-08-27 23:56:10 +03002243 if (hotplug_trigger)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002244 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002245
2246 if (de_iir & DE_AUX_CHANNEL_A)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002247 dp_aux_irq_handler(dev_priv);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002248
2249 if (de_iir & DE_GSE)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002250 intel_opregion_asle_intr(dev_priv);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002251
Paulo Zanonic008bc62013-07-12 16:35:10 -03002252 if (de_iir & DE_POISON)
2253 DRM_ERROR("Poison interrupt\n");
2254
Damien Lespiau055e3932014-08-18 13:49:10 +01002255 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02002256 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2257 intel_pipe_handle_vblank(dev_priv, pipe))
2258 intel_check_page_flip(dev_priv, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002259
Daniel Vetter40da17c22013-10-21 18:04:36 +02002260 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002261 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002262
Daniel Vetter40da17c22013-10-21 18:04:36 +02002263 if (de_iir & DE_PIPE_CRC_DONE(pipe))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002264 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02002265
Daniel Vetter40da17c22013-10-21 18:04:36 +02002266 /* plane/pipes map 1:1 on ilk+ */
Maarten Lankhorst5251f042016-05-17 15:07:47 +02002267 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
Maarten Lankhorst51cbaf02016-05-17 15:07:49 +02002268 intel_finish_page_flip_cs(dev_priv, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002269 }
2270
2271 /* check event from PCH */
2272 if (de_iir & DE_PCH_EVENT) {
2273 u32 pch_iir = I915_READ(SDEIIR);
2274
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002275 if (HAS_PCH_CPT(dev_priv))
2276 cpt_irq_handler(dev_priv, pch_iir);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002277 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002278 ibx_irq_handler(dev_priv, pch_iir);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002279
2280 /* should clear PCH hotplug event before clear CPU irq */
2281 I915_WRITE(SDEIIR, pch_iir);
2282 }
2283
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002284 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2285 ironlake_rps_change_irq_handler(dev_priv);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002286}
2287
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002288static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2289 u32 de_iir)
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002290{
Damien Lespiau07d27e22014-03-03 17:31:46 +00002291 enum pipe pipe;
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03002292 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2293
Ville Syrjälä40e56412015-08-27 23:56:10 +03002294 if (hotplug_trigger)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002295 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002296
2297 if (de_iir & DE_ERR_INT_IVB)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002298 ivb_err_int_handler(dev_priv);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002299
2300 if (de_iir & DE_AUX_CHANNEL_A_IVB)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002301 dp_aux_irq_handler(dev_priv);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002302
2303 if (de_iir & DE_GSE_IVB)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002304 intel_opregion_asle_intr(dev_priv);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002305
Damien Lespiau055e3932014-08-18 13:49:10 +01002306 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02002307 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2308 intel_pipe_handle_vblank(dev_priv, pipe))
2309 intel_check_page_flip(dev_priv, pipe);
Daniel Vetter40da17c22013-10-21 18:04:36 +02002310
2311 /* plane/pipes map 1:1 on ilk+ */
Maarten Lankhorst5251f042016-05-17 15:07:47 +02002312 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
Maarten Lankhorst51cbaf02016-05-17 15:07:49 +02002313 intel_finish_page_flip_cs(dev_priv, pipe);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002314 }
2315
2316 /* check event from PCH */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002317 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002318 u32 pch_iir = I915_READ(SDEIIR);
2319
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002320 cpt_irq_handler(dev_priv, pch_iir);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002321
2322 /* clear PCH hotplug event before clear CPU irq */
2323 I915_WRITE(SDEIIR, pch_iir);
2324 }
2325}
2326
Oscar Mateo72c90f62014-06-16 16:10:57 +01002327/*
2328 * To handle irqs with the minimum potential races with fresh interrupts, we:
2329 * 1 - Disable Master Interrupt Control.
2330 * 2 - Find the source(s) of the interrupt.
2331 * 3 - Clear the Interrupt Identity bits (IIR).
2332 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2333 * 5 - Re-enable Master Interrupt Control.
2334 */
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002335static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002336{
Daniel Vetter45a83f82014-05-12 19:17:55 +02002337 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002338 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002339 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01002340 irqreturn_t ret = IRQ_NONE;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002341
Imre Deak2dd2a882015-02-24 11:14:30 +02002342 if (!intel_irqs_enabled(dev_priv))
2343 return IRQ_NONE;
2344
Imre Deak1f814da2015-12-16 02:52:19 +02002345 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2346 disable_rpm_wakeref_asserts(dev_priv);
2347
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002348 /* disable master interrupt before clearing iir */
2349 de_ier = I915_READ(DEIER);
2350 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Paulo Zanoni23a78512013-07-12 16:35:14 -03002351 POSTING_READ(DEIER);
Chris Wilson0e434062012-05-09 21:45:44 +01002352
Paulo Zanoni44498ae2013-02-22 17:05:28 -03002353 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2354 * interrupts will will be stored on its back queue, and then we'll be
2355 * able to process them after we restore SDEIER (as soon as we restore
2356 * it, we'll get an interrupt if SDEIIR still has something to process
2357 * due to its back queue). */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002358 if (!HAS_PCH_NOP(dev_priv)) {
Ben Widawskyab5c6082013-04-05 13:12:41 -07002359 sde_ier = I915_READ(SDEIER);
2360 I915_WRITE(SDEIER, 0);
2361 POSTING_READ(SDEIER);
2362 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03002363
Oscar Mateo72c90f62014-06-16 16:10:57 +01002364 /* Find, clear, then process each source of interrupt */
2365
Chris Wilson0e434062012-05-09 21:45:44 +01002366 gt_iir = I915_READ(GTIIR);
2367 if (gt_iir) {
Oscar Mateo72c90f62014-06-16 16:10:57 +01002368 I915_WRITE(GTIIR, gt_iir);
2369 ret = IRQ_HANDLED;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002370 if (INTEL_GEN(dev_priv) >= 6)
Ville Syrjälä261e40b2016-04-13 21:19:57 +03002371 snb_gt_irq_handler(dev_priv, gt_iir);
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03002372 else
Ville Syrjälä261e40b2016-04-13 21:19:57 +03002373 ilk_gt_irq_handler(dev_priv, gt_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01002374 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002375
2376 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01002377 if (de_iir) {
Oscar Mateo72c90f62014-06-16 16:10:57 +01002378 I915_WRITE(DEIIR, de_iir);
2379 ret = IRQ_HANDLED;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002380 if (INTEL_GEN(dev_priv) >= 7)
2381 ivb_display_irq_handler(dev_priv, de_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002382 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002383 ilk_display_irq_handler(dev_priv, de_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01002384 }
2385
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002386 if (INTEL_GEN(dev_priv) >= 6) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002387 u32 pm_iir = I915_READ(GEN6_PMIIR);
2388 if (pm_iir) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002389 I915_WRITE(GEN6_PMIIR, pm_iir);
2390 ret = IRQ_HANDLED;
Oscar Mateo72c90f62014-06-16 16:10:57 +01002391 gen6_rps_irq_handler(dev_priv, pm_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002392 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002393 }
2394
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002395 I915_WRITE(DEIER, de_ier);
2396 POSTING_READ(DEIER);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002397 if (!HAS_PCH_NOP(dev_priv)) {
Ben Widawskyab5c6082013-04-05 13:12:41 -07002398 I915_WRITE(SDEIER, sde_ier);
2399 POSTING_READ(SDEIER);
2400 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002401
Imre Deak1f814da2015-12-16 02:52:19 +02002402 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2403 enable_rpm_wakeref_asserts(dev_priv);
2404
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002405 return ret;
2406}
2407
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002408static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2409 u32 hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002410 const u32 hpd[HPD_NUM_PINS])
Shashank Sharmad04a4922014-08-22 17:40:41 +05302411{
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002412 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
Shashank Sharmad04a4922014-08-22 17:40:41 +05302413
Ville Syrjäläa52bb152015-08-27 23:56:11 +03002414 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2415 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
Shashank Sharmad04a4922014-08-22 17:40:41 +05302416
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002417 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002418 dig_hotplug_reg, hpd,
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002419 bxt_port_hotplug_long_detect);
Ville Syrjälä40e56412015-08-27 23:56:10 +03002420
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002421 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Shashank Sharmad04a4922014-08-22 17:40:41 +05302422}
2423
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002424static irqreturn_t
2425gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
Ben Widawskyabd58f02013-11-02 21:07:09 -07002426{
Ben Widawskyabd58f02013-11-02 21:07:09 -07002427 irqreturn_t ret = IRQ_NONE;
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002428 u32 iir;
Daniel Vetterc42664c2013-11-07 11:05:40 +01002429 enum pipe pipe;
Jesse Barnes88e04702014-11-13 17:51:48 +00002430
Ben Widawskyabd58f02013-11-02 21:07:09 -07002431 if (master_ctl & GEN8_DE_MISC_IRQ) {
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002432 iir = I915_READ(GEN8_DE_MISC_IIR);
2433 if (iir) {
2434 I915_WRITE(GEN8_DE_MISC_IIR, iir);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002435 ret = IRQ_HANDLED;
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002436 if (iir & GEN8_DE_MISC_GSE)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002437 intel_opregion_asle_intr(dev_priv);
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002438 else
2439 DRM_ERROR("Unexpected DE Misc interrupt\n");
Ben Widawskyabd58f02013-11-02 21:07:09 -07002440 }
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002441 else
2442 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
Ben Widawskyabd58f02013-11-02 21:07:09 -07002443 }
2444
Daniel Vetter6d766f02013-11-07 14:49:55 +01002445 if (master_ctl & GEN8_DE_PORT_IRQ) {
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002446 iir = I915_READ(GEN8_DE_PORT_IIR);
2447 if (iir) {
2448 u32 tmp_mask;
Shashank Sharmad04a4922014-08-22 17:40:41 +05302449 bool found = false;
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002450
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002451 I915_WRITE(GEN8_DE_PORT_IIR, iir);
Daniel Vetter6d766f02013-11-07 14:49:55 +01002452 ret = IRQ_HANDLED;
Jesse Barnes88e04702014-11-13 17:51:48 +00002453
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002454 tmp_mask = GEN8_AUX_CHANNEL_A;
2455 if (INTEL_INFO(dev_priv)->gen >= 9)
2456 tmp_mask |= GEN9_AUX_CHANNEL_B |
2457 GEN9_AUX_CHANNEL_C |
2458 GEN9_AUX_CHANNEL_D;
2459
2460 if (iir & tmp_mask) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002461 dp_aux_irq_handler(dev_priv);
Shashank Sharmad04a4922014-08-22 17:40:41 +05302462 found = true;
2463 }
2464
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002465 if (IS_GEN9_LP(dev_priv)) {
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002466 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2467 if (tmp_mask) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002468 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2469 hpd_bxt);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002470 found = true;
2471 }
2472 } else if (IS_BROADWELL(dev_priv)) {
2473 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2474 if (tmp_mask) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002475 ilk_hpd_irq_handler(dev_priv,
2476 tmp_mask, hpd_bdw);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002477 found = true;
2478 }
Shashank Sharmad04a4922014-08-22 17:40:41 +05302479 }
2480
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002481 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002482 gmbus_irq_handler(dev_priv);
Shashank Sharma9e637432014-08-22 17:40:43 +05302483 found = true;
2484 }
2485
Shashank Sharmad04a4922014-08-22 17:40:41 +05302486 if (!found)
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002487 DRM_ERROR("Unexpected DE Port interrupt\n");
Daniel Vetter6d766f02013-11-07 14:49:55 +01002488 }
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002489 else
2490 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
Daniel Vetter6d766f02013-11-07 14:49:55 +01002491 }
2492
Damien Lespiau055e3932014-08-18 13:49:10 +01002493 for_each_pipe(dev_priv, pipe) {
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002494 u32 flip_done, fault_errors;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002495
Daniel Vetterc42664c2013-11-07 11:05:40 +01002496 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2497 continue;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002498
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002499 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2500 if (!iir) {
Ben Widawskyabd58f02013-11-02 21:07:09 -07002501 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002502 continue;
2503 }
2504
2505 ret = IRQ_HANDLED;
2506 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2507
Daniel Vetter5a21b662016-05-24 17:13:53 +02002508 if (iir & GEN8_PIPE_VBLANK &&
2509 intel_pipe_handle_vblank(dev_priv, pipe))
2510 intel_check_page_flip(dev_priv, pipe);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002511
2512 flip_done = iir;
2513 if (INTEL_INFO(dev_priv)->gen >= 9)
2514 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2515 else
2516 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2517
Maarten Lankhorst5251f042016-05-17 15:07:47 +02002518 if (flip_done)
Maarten Lankhorst51cbaf02016-05-17 15:07:49 +02002519 intel_finish_page_flip_cs(dev_priv, pipe);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002520
2521 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002522 hsw_pipe_crc_irq_handler(dev_priv, pipe);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002523
2524 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2525 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2526
2527 fault_errors = iir;
2528 if (INTEL_INFO(dev_priv)->gen >= 9)
2529 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2530 else
2531 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2532
2533 if (fault_errors)
Tvrtko Ursulin1353ec32016-10-27 13:48:32 +01002534 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002535 pipe_name(pipe),
2536 fault_errors);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002537 }
2538
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002539 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
Shashank Sharma266ea3d2014-08-22 17:40:42 +05302540 master_ctl & GEN8_DE_PCH_IRQ) {
Daniel Vetter92d03a82013-11-07 11:05:43 +01002541 /*
2542 * FIXME(BDW): Assume for now that the new interrupt handling
2543 * scheme also closed the SDE interrupt handling race we've seen
2544 * on older pch-split platforms. But this needs testing.
2545 */
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002546 iir = I915_READ(SDEIIR);
2547 if (iir) {
2548 I915_WRITE(SDEIIR, iir);
Daniel Vetter92d03a82013-11-07 11:05:43 +01002549 ret = IRQ_HANDLED;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002550
Rodrigo Vivi7b22b8c2017-06-02 13:06:39 -07002551 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
2552 HAS_PCH_CNP(dev_priv))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002553 spt_irq_handler(dev_priv, iir);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002554 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002555 cpt_irq_handler(dev_priv, iir);
Jani Nikula2dfb0b82016-01-07 10:29:10 +02002556 } else {
2557 /*
2558 * Like on previous PCH there seems to be something
2559 * fishy going on with forwarding PCH interrupts.
2560 */
2561 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2562 }
Daniel Vetter92d03a82013-11-07 11:05:43 +01002563 }
2564
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002565 return ret;
2566}
2567
2568static irqreturn_t gen8_irq_handler(int irq, void *arg)
2569{
2570 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002571 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002572 u32 master_ctl;
Ville Syrjäläe30e2512016-04-13 21:19:58 +03002573 u32 gt_iir[4] = {};
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002574 irqreturn_t ret;
2575
2576 if (!intel_irqs_enabled(dev_priv))
2577 return IRQ_NONE;
2578
2579 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2580 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2581 if (!master_ctl)
2582 return IRQ_NONE;
2583
2584 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2585
2586 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2587 disable_rpm_wakeref_asserts(dev_priv);
2588
2589 /* Find, clear, then process each source of interrupt */
Ville Syrjäläe30e2512016-04-13 21:19:58 +03002590 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2591 gen8_gt_irq_handler(dev_priv, gt_iir);
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002592 ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2593
Chris Wilsoncb0d2052015-04-07 16:21:04 +01002594 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2595 POSTING_READ_FW(GEN8_MASTER_IRQ);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002596
Imre Deak1f814da2015-12-16 02:52:19 +02002597 enable_rpm_wakeref_asserts(dev_priv);
2598
Ben Widawskyabd58f02013-11-02 21:07:09 -07002599 return ret;
2600}
2601
Jesse Barnes8a905232009-07-11 16:48:03 -04002602/**
Chris Wilsond5367302017-06-20 10:57:43 +01002603 * i915_reset_device - do process context error handling work
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002604 * @dev_priv: i915 device private
Jesse Barnes8a905232009-07-11 16:48:03 -04002605 *
2606 * Fire an error uevent so userspace can see that a hang or error
2607 * was detected.
2608 */
Chris Wilsond5367302017-06-20 10:57:43 +01002609static void i915_reset_device(struct drm_i915_private *dev_priv)
Jesse Barnes8a905232009-07-11 16:48:03 -04002610{
Chris Wilson91c8a322016-07-05 10:40:23 +01002611 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
Ben Widawskycce723e2013-07-19 09:16:42 -07002612 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2613 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2614 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
Jesse Barnes8a905232009-07-11 16:48:03 -04002615
Chris Wilsonc0336662016-05-06 15:40:21 +01002616 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04002617
Chris Wilson8af29b02016-09-09 14:11:47 +01002618 DRM_DEBUG_DRIVER("resetting chip\n");
2619 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2620
Chris Wilson8af29b02016-09-09 14:11:47 +01002621 intel_prepare_reset(dev_priv);
Ville Syrjälä75147472014-11-24 18:28:11 +02002622
Chris Wilson8c185ec2017-03-16 17:13:02 +00002623 set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
2624 wake_up_all(&dev_priv->gpu_error.wait_queue);
2625
Chris Wilson780f2622016-09-09 14:11:52 +01002626 do {
2627 /*
2628 * All state reset _must_ be completed before we update the
2629 * reset counter, for otherwise waiters might miss the reset
2630 * pending state and not properly drop locks, resulting in
2631 * deadlocks with the reset work.
2632 */
2633 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
2634 i915_reset(dev_priv);
2635 mutex_unlock(&dev_priv->drm.struct_mutex);
2636 }
2637
2638 /* We need to wait for anyone holding the lock to wakeup */
2639 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
Chris Wilson8c185ec2017-03-16 17:13:02 +00002640 I915_RESET_HANDOFF,
Chris Wilson780f2622016-09-09 14:11:52 +01002641 TASK_UNINTERRUPTIBLE,
2642 HZ));
Ville Syrjälä75147472014-11-24 18:28:11 +02002643
Chris Wilson8af29b02016-09-09 14:11:47 +01002644 intel_finish_reset(dev_priv);
Daniel Vetter17e1df02013-09-08 21:57:13 +02002645
Chris Wilson780f2622016-09-09 14:11:52 +01002646 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson8af29b02016-09-09 14:11:47 +01002647 kobject_uevent_env(kobj,
2648 KOBJ_CHANGE, reset_done_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04002649}
2650
Ben Widawskyd6369512016-09-20 16:54:32 +03002651static inline void
2652i915_err_print_instdone(struct drm_i915_private *dev_priv,
2653 struct intel_instdone *instdone)
2654{
Ben Widawskyf9e61372016-09-20 16:54:33 +03002655 int slice;
2656 int subslice;
2657
Ben Widawskyd6369512016-09-20 16:54:32 +03002658 pr_err(" INSTDONE: 0x%08x\n", instdone->instdone);
2659
2660 if (INTEL_GEN(dev_priv) <= 3)
2661 return;
2662
2663 pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common);
2664
2665 if (INTEL_GEN(dev_priv) <= 6)
2666 return;
2667
Ben Widawskyf9e61372016-09-20 16:54:33 +03002668 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
2669 pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
2670 slice, subslice, instdone->sampler[slice][subslice]);
2671
2672 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
2673 pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n",
2674 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03002675}
2676
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002677static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
Jesse Barnes8a905232009-07-11 16:48:03 -04002678{
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002679 u32 eir;
Jesse Barnes8a905232009-07-11 16:48:03 -04002680
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002681 if (!IS_GEN2(dev_priv))
2682 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
Jesse Barnes8a905232009-07-11 16:48:03 -04002683
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002684 if (INTEL_GEN(dev_priv) < 4)
2685 I915_WRITE(IPEIR, I915_READ(IPEIR));
2686 else
2687 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002688
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002689 I915_WRITE(EIR, I915_READ(EIR));
Jesse Barnes8a905232009-07-11 16:48:03 -04002690 eir = I915_READ(EIR);
2691 if (eir) {
2692 /*
2693 * some errors might have become stuck,
2694 * mask them.
2695 */
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002696 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04002697 I915_WRITE(EMR, I915_READ(EMR) | eir);
2698 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2699 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01002700}
2701
2702/**
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02002703 * i915_handle_error - handle a gpu error
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002704 * @dev_priv: i915 device private
arun.siluvery@linux.intel.com14b730f2016-03-18 20:07:55 +00002705 * @engine_mask: mask representing engines that are hung
Michel Thierry87c390b2017-01-11 20:18:08 -08002706 * @fmt: Error message format string
2707 *
Javier Martinez Canillasaafd8582015-10-08 09:57:49 +02002708 * Do some basic checking of register state at error time and
Chris Wilson35aed2e2010-05-27 13:18:12 +01002709 * dump it to the syslog. Also call i915_capture_error_state() to make
2710 * sure we get a record and make it available in debugfs. Fire a uevent
2711 * so userspace knows something bad happened (should trigger collection
2712 * of a ring dump etc.).
2713 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002714void i915_handle_error(struct drm_i915_private *dev_priv,
2715 u32 engine_mask,
Mika Kuoppala58174462014-02-25 17:11:26 +02002716 const char *fmt, ...)
Chris Wilson35aed2e2010-05-27 13:18:12 +01002717{
Mika Kuoppala58174462014-02-25 17:11:26 +02002718 va_list args;
2719 char error_msg[80];
Chris Wilson35aed2e2010-05-27 13:18:12 +01002720
Mika Kuoppala58174462014-02-25 17:11:26 +02002721 va_start(args, fmt);
2722 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2723 va_end(args);
2724
Chris Wilson1604a862017-03-14 17:18:40 +00002725 /*
2726 * In most cases it's guaranteed that we get here with an RPM
2727 * reference held, for example because there is a pending GPU
2728 * request that won't finish until the reset is done. This
2729 * isn't the case at least when we get here by doing a
2730 * simulated reset via debugfs, so get an RPM reference.
2731 */
2732 intel_runtime_pm_get(dev_priv);
2733
Chris Wilsonc0336662016-05-06 15:40:21 +01002734 i915_capture_error_state(dev_priv, engine_mask, error_msg);
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002735 i915_clear_error_registers(dev_priv);
Jesse Barnes8a905232009-07-11 16:48:03 -04002736
Chris Wilson8af29b02016-09-09 14:11:47 +01002737 if (!engine_mask)
Chris Wilson1604a862017-03-14 17:18:40 +00002738 goto out;
Ben Gamariba1234d2009-09-14 17:48:47 -04002739
Chris Wilsond5367302017-06-20 10:57:43 +01002740 if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
2741 wait_event(dev_priv->gpu_error.reset_queue,
2742 !test_bit(I915_RESET_BACKOFF,
2743 &dev_priv->gpu_error.flags));
Chris Wilson1604a862017-03-14 17:18:40 +00002744 goto out;
Chris Wilsond5367302017-06-20 10:57:43 +01002745 }
Chris Wilson8af29b02016-09-09 14:11:47 +01002746
Chris Wilsond5367302017-06-20 10:57:43 +01002747 i915_reset_device(dev_priv);
2748
2749 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
2750 wake_up_all(&dev_priv->gpu_error.reset_queue);
Chris Wilson1604a862017-03-14 17:18:40 +00002751
2752out:
2753 intel_runtime_pm_put(dev_priv);
Jesse Barnes8a905232009-07-11 16:48:03 -04002754}
2755
Keith Packard42f52ef2008-10-18 19:39:29 -07002756/* Called from drm generic code, passed 'crtc' which
2757 * we use as a pipe index
2758 */
Chris Wilson86e83e32016-10-07 20:49:52 +01002759static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002760{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002761 struct drm_i915_private *dev_priv = to_i915(dev);
Keith Packarde9d21d72008-10-16 11:31:38 -07002762 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002763
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002764 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilson86e83e32016-10-07 20:49:52 +01002765 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2766 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2767
2768 return 0;
2769}
2770
2771static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
2772{
2773 struct drm_i915_private *dev_priv = to_i915(dev);
2774 unsigned long irqflags;
2775
2776 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2777 i915_enable_pipestat(dev_priv, pipe,
2778 PIPE_START_VBLANK_INTERRUPT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002779 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002780
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002781 return 0;
2782}
2783
Thierry Reding88e72712015-09-24 18:35:31 +02002784static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002785{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002786 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002787 unsigned long irqflags;
Tvrtko Ursulin55b8f2a2016-10-14 09:17:22 +01002788 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
Chris Wilson86e83e32016-10-07 20:49:52 +01002789 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002790
Jesse Barnesf796cf82011-04-07 13:58:17 -07002791 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02002792 ilk_enable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002793 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2794
2795 return 0;
2796}
2797
Thierry Reding88e72712015-09-24 18:35:31 +02002798static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
Ben Widawskyabd58f02013-11-02 21:07:09 -07002799{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002800 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002801 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002802
Ben Widawskyabd58f02013-11-02 21:07:09 -07002803 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjälä013d3752015-11-23 18:06:17 +02002804 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002805 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Ville Syrjälä013d3752015-11-23 18:06:17 +02002806
Ben Widawskyabd58f02013-11-02 21:07:09 -07002807 return 0;
2808}
2809
Keith Packard42f52ef2008-10-18 19:39:29 -07002810/* Called from drm generic code, passed 'crtc' which
2811 * we use as a pipe index
2812 */
Chris Wilson86e83e32016-10-07 20:49:52 +01002813static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
2814{
2815 struct drm_i915_private *dev_priv = to_i915(dev);
2816 unsigned long irqflags;
2817
2818 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2819 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2820 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2821}
2822
2823static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002824{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002825 struct drm_i915_private *dev_priv = to_i915(dev);
Keith Packarde9d21d72008-10-16 11:31:38 -07002826 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002827
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002828 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002829 i915_disable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002830 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002831 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2832}
2833
Thierry Reding88e72712015-09-24 18:35:31 +02002834static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002835{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002836 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002837 unsigned long irqflags;
Tvrtko Ursulin55b8f2a2016-10-14 09:17:22 +01002838 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
Chris Wilson86e83e32016-10-07 20:49:52 +01002839 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002840
2841 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02002842 ilk_disable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002843 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2844}
2845
Thierry Reding88e72712015-09-24 18:35:31 +02002846static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
Ben Widawskyabd58f02013-11-02 21:07:09 -07002847{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002848 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002849 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002850
Ben Widawskyabd58f02013-11-02 21:07:09 -07002851 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjälä013d3752015-11-23 18:06:17 +02002852 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002853 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2854}
2855
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00002856static void ibx_irq_reset(struct drm_i915_private *dev_priv)
Paulo Zanoni91738a92013-06-05 14:21:51 -03002857{
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01002858 if (HAS_PCH_NOP(dev_priv))
Paulo Zanoni91738a92013-06-05 14:21:51 -03002859 return;
2860
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002861 GEN5_IRQ_RESET(SDE);
Paulo Zanoni105b1222014-04-01 15:37:17 -03002862
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01002863 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
Paulo Zanoni105b1222014-04-01 15:37:17 -03002864 I915_WRITE(SERR_INT, 0xffffffff);
Paulo Zanoni622364b2014-04-01 15:37:22 -03002865}
Paulo Zanoni105b1222014-04-01 15:37:17 -03002866
Paulo Zanoni622364b2014-04-01 15:37:22 -03002867/*
2868 * SDEIER is also touched by the interrupt handler to work around missed PCH
2869 * interrupts. Hence we can't update it after the interrupt handler is enabled -
2870 * instead we unconditionally enable all PCH interrupt sources here, but then
2871 * only unmask them as needed with SDEIMR.
2872 *
2873 * This function needs to be called before interrupts are enabled.
2874 */
2875static void ibx_irq_pre_postinstall(struct drm_device *dev)
2876{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002877 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanoni622364b2014-04-01 15:37:22 -03002878
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01002879 if (HAS_PCH_NOP(dev_priv))
Paulo Zanoni622364b2014-04-01 15:37:22 -03002880 return;
2881
2882 WARN_ON(I915_READ(SDEIER) != 0);
Paulo Zanoni91738a92013-06-05 14:21:51 -03002883 I915_WRITE(SDEIER, 0xffffffff);
2884 POSTING_READ(SDEIER);
2885}
2886
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00002887static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002888{
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002889 GEN5_IRQ_RESET(GT);
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00002890 if (INTEL_GEN(dev_priv) >= 6)
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002891 GEN5_IRQ_RESET(GEN6_PM);
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002892}
2893
Ville Syrjälä70591a42014-10-30 19:42:58 +02002894static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2895{
2896 enum pipe pipe;
2897
Ville Syrjälä71b8b412016-04-11 16:56:31 +03002898 if (IS_CHERRYVIEW(dev_priv))
2899 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2900 else
2901 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2902
Ville Syrjäläad22d102016-04-12 18:56:14 +03002903 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
Ville Syrjälä70591a42014-10-30 19:42:58 +02002904 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2905
Ville Syrjäläad22d102016-04-12 18:56:14 +03002906 for_each_pipe(dev_priv, pipe) {
2907 I915_WRITE(PIPESTAT(pipe),
2908 PIPE_FIFO_UNDERRUN_STATUS |
2909 PIPESTAT_INT_STATUS_MASK);
2910 dev_priv->pipestat_irq_mask[pipe] = 0;
2911 }
Ville Syrjälä70591a42014-10-30 19:42:58 +02002912
2913 GEN5_IRQ_RESET(VLV_);
Ville Syrjäläad22d102016-04-12 18:56:14 +03002914 dev_priv->irq_mask = ~0;
Ville Syrjälä70591a42014-10-30 19:42:58 +02002915}
2916
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002917static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2918{
2919 u32 pipestat_mask;
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03002920 u32 enable_mask;
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002921 enum pipe pipe;
2922
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002923 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
2924 PIPE_CRC_DONE_INTERRUPT_STATUS;
2925
2926 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2927 for_each_pipe(dev_priv, pipe)
2928 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2929
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03002930 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2931 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
Ville Syrjäläebf5f922017-04-27 19:02:22 +03002932 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2933 I915_LPE_PIPE_A_INTERRUPT |
2934 I915_LPE_PIPE_B_INTERRUPT;
2935
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002936 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläebf5f922017-04-27 19:02:22 +03002937 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2938 I915_LPE_PIPE_C_INTERRUPT;
Ville Syrjälä6b7eafc2016-04-11 16:56:29 +03002939
2940 WARN_ON(dev_priv->irq_mask != ~0);
2941
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03002942 dev_priv->irq_mask = ~enable_mask;
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002943
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03002944 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002945}
2946
2947/* drm_dma.h hooks
2948*/
2949static void ironlake_irq_reset(struct drm_device *dev)
2950{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002951 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002952
2953 I915_WRITE(HWSTAM, 0xffffffff);
2954
2955 GEN5_IRQ_RESET(DE);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01002956 if (IS_GEN7(dev_priv))
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002957 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
2958
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00002959 gen5_gt_irq_reset(dev_priv);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002960
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00002961 ibx_irq_reset(dev_priv);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002962}
2963
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002964static void valleyview_irq_preinstall(struct drm_device *dev)
2965{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002966 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002967
Ville Syrjälä34c7b8a2016-04-13 21:19:48 +03002968 I915_WRITE(VLV_MASTER_IER, 0);
2969 POSTING_READ(VLV_MASTER_IER);
2970
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00002971 gen5_gt_irq_reset(dev_priv);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002972
Ville Syrjäläad22d102016-04-12 18:56:14 +03002973 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03002974 if (dev_priv->display_irqs_enabled)
2975 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03002976 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002977}
2978
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02002979static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
2980{
2981 GEN8_IRQ_RESET_NDX(GT, 0);
2982 GEN8_IRQ_RESET_NDX(GT, 1);
2983 GEN8_IRQ_RESET_NDX(GT, 2);
2984 GEN8_IRQ_RESET_NDX(GT, 3);
2985}
2986
Paulo Zanoni823f6b32014-04-01 15:37:26 -03002987static void gen8_irq_reset(struct drm_device *dev)
Ben Widawskyabd58f02013-11-02 21:07:09 -07002988{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002989 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002990 int pipe;
2991
Ben Widawskyabd58f02013-11-02 21:07:09 -07002992 I915_WRITE(GEN8_MASTER_IRQ, 0);
2993 POSTING_READ(GEN8_MASTER_IRQ);
2994
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02002995 gen8_gt_irq_reset(dev_priv);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002996
Damien Lespiau055e3932014-08-18 13:49:10 +01002997 for_each_pipe(dev_priv, pipe)
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002998 if (intel_display_power_is_enabled(dev_priv,
2999 POWER_DOMAIN_PIPE(pipe)))
Paulo Zanoni813bde42014-07-04 11:50:29 -03003000 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003001
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03003002 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3003 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3004 GEN5_IRQ_RESET(GEN8_PCU_);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003005
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003006 if (HAS_PCH_SPLIT(dev_priv))
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003007 ibx_irq_reset(dev_priv);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003008}
Ben Widawskyabd58f02013-11-02 21:07:09 -07003009
Damien Lespiau4c6c03b2015-03-06 18:50:48 +00003010void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3011 unsigned int pipe_mask)
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003012{
Paulo Zanoni1180e202014-10-07 18:02:52 -03003013 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003014 enum pipe pipe;
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003015
Daniel Vetter13321782014-09-15 14:55:29 +02003016 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003017 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3018 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3019 dev_priv->de_irq_mask[pipe],
3020 ~dev_priv->de_irq_mask[pipe] | extra_ier);
Daniel Vetter13321782014-09-15 14:55:29 +02003021 spin_unlock_irq(&dev_priv->irq_lock);
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003022}
3023
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003024void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3025 unsigned int pipe_mask)
3026{
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003027 enum pipe pipe;
3028
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003029 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003030 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3031 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003032 spin_unlock_irq(&dev_priv->irq_lock);
3033
3034 /* make sure we're done processing display irqs */
Chris Wilson91c8a322016-07-05 10:40:23 +01003035 synchronize_irq(dev_priv->drm.irq);
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003036}
3037
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003038static void cherryview_irq_preinstall(struct drm_device *dev)
3039{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003040 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003041
3042 I915_WRITE(GEN8_MASTER_IRQ, 0);
3043 POSTING_READ(GEN8_MASTER_IRQ);
3044
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02003045 gen8_gt_irq_reset(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003046
3047 GEN5_IRQ_RESET(GEN8_PCU_);
3048
Ville Syrjäläad22d102016-04-12 18:56:14 +03003049 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003050 if (dev_priv->display_irqs_enabled)
3051 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003052 spin_unlock_irq(&dev_priv->irq_lock);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003053}
3054
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003055static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
Ville Syrjälä87a02102015-08-27 23:55:57 +03003056 const u32 hpd[HPD_NUM_PINS])
3057{
Ville Syrjälä87a02102015-08-27 23:55:57 +03003058 struct intel_encoder *encoder;
3059 u32 enabled_irqs = 0;
3060
Chris Wilson91c8a322016-07-05 10:40:23 +01003061 for_each_intel_encoder(&dev_priv->drm, encoder)
Ville Syrjälä87a02102015-08-27 23:55:57 +03003062 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3063 enabled_irqs |= hpd[encoder->hpd_pin];
3064
3065 return enabled_irqs;
3066}
3067
Imre Deak1a56b1a2017-01-27 11:39:21 +02003068static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3069{
3070 u32 hotplug;
3071
3072 /*
3073 * Enable digital hotplug on the PCH, and configure the DP short pulse
3074 * duration to 2ms (which is the minimum in the Display Port spec).
3075 * The pulse duration bits are reserved on LPT+.
3076 */
3077 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3078 hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3079 PORTC_PULSE_DURATION_MASK |
3080 PORTD_PULSE_DURATION_MASK);
3081 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3082 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3083 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3084 /*
3085 * When CPU and PCH are on the same package, port A
3086 * HPD must be enabled in both north and south.
3087 */
3088 if (HAS_PCH_LPT_LP(dev_priv))
3089 hotplug |= PORTA_HOTPLUG_ENABLE;
3090 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3091}
3092
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003093static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
Keith Packard7fe0b972011-09-19 13:31:02 -07003094{
Imre Deak1a56b1a2017-01-27 11:39:21 +02003095 u32 hotplug_irqs, enabled_irqs;
Keith Packard7fe0b972011-09-19 13:31:02 -07003096
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003097 if (HAS_PCH_IBX(dev_priv)) {
Daniel Vetterfee884e2013-07-04 23:35:21 +02003098 hotplug_irqs = SDE_HOTPLUG_MASK;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003099 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003100 } else {
Daniel Vetterfee884e2013-07-04 23:35:21 +02003101 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003102 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003103 }
3104
Daniel Vetterfee884e2013-07-04 23:35:21 +02003105 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003106
Imre Deak1a56b1a2017-01-27 11:39:21 +02003107 ibx_hpd_detection_setup(dev_priv);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003108}
Xiong Zhang26951ca2015-08-17 15:55:50 +08003109
Imre Deak2a57d9c2017-01-27 11:39:18 +02003110static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3111{
3112 u32 hotplug;
3113
3114 /* Enable digital hotplug on the PCH */
3115 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3116 hotplug |= PORTA_HOTPLUG_ENABLE |
3117 PORTB_HOTPLUG_ENABLE |
3118 PORTC_HOTPLUG_ENABLE |
3119 PORTD_HOTPLUG_ENABLE;
3120 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3121
3122 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3123 hotplug |= PORTE_HOTPLUG_ENABLE;
3124 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3125}
3126
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003127static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003128{
Imre Deak2a57d9c2017-01-27 11:39:18 +02003129 u32 hotplug_irqs, enabled_irqs;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003130
3131 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003132 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003133
3134 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3135
Imre Deak2a57d9c2017-01-27 11:39:18 +02003136 spt_hpd_detection_setup(dev_priv);
Keith Packard7fe0b972011-09-19 13:31:02 -07003137}
3138
Imre Deak1a56b1a2017-01-27 11:39:21 +02003139static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3140{
3141 u32 hotplug;
3142
3143 /*
3144 * Enable digital hotplug on the CPU, and configure the DP short pulse
3145 * duration to 2ms (which is the minimum in the Display Port spec)
3146 * The pulse duration bits are reserved on HSW+.
3147 */
3148 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3149 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3150 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3151 DIGITAL_PORTA_PULSE_DURATION_2ms;
3152 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3153}
3154
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003155static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003156{
Imre Deak1a56b1a2017-01-27 11:39:21 +02003157 u32 hotplug_irqs, enabled_irqs;
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003158
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003159 if (INTEL_GEN(dev_priv) >= 8) {
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003160 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003161 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003162
3163 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003164 } else if (INTEL_GEN(dev_priv) >= 7) {
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03003165 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003166 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003167
3168 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03003169 } else {
3170 hotplug_irqs = DE_DP_A_HOTPLUG;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003171 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003172
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003173 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3174 }
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003175
Imre Deak1a56b1a2017-01-27 11:39:21 +02003176 ilk_hpd_detection_setup(dev_priv);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003177
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003178 ibx_hpd_irq_setup(dev_priv);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003179}
3180
Imre Deak2a57d9c2017-01-27 11:39:18 +02003181static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3182 u32 enabled_irqs)
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003183{
Imre Deak2a57d9c2017-01-27 11:39:18 +02003184 u32 hotplug;
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003185
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003186 hotplug = I915_READ(PCH_PORT_HOTPLUG);
Imre Deak2a57d9c2017-01-27 11:39:18 +02003187 hotplug |= PORTA_HOTPLUG_ENABLE |
3188 PORTB_HOTPLUG_ENABLE |
3189 PORTC_HOTPLUG_ENABLE;
Shubhangi Shrivastavad252bf62016-03-31 16:11:47 +05303190
3191 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3192 hotplug, enabled_irqs);
3193 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3194
3195 /*
3196 * For BXT invert bit has to be set based on AOB design
3197 * for HPD detection logic, update it based on VBT fields.
3198 */
Shubhangi Shrivastavad252bf62016-03-31 16:11:47 +05303199 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3200 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3201 hotplug |= BXT_DDIA_HPD_INVERT;
3202 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3203 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3204 hotplug |= BXT_DDIB_HPD_INVERT;
3205 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3206 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3207 hotplug |= BXT_DDIC_HPD_INVERT;
3208
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003209 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003210}
3211
Imre Deak2a57d9c2017-01-27 11:39:18 +02003212static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3213{
3214 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3215}
3216
3217static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3218{
3219 u32 hotplug_irqs, enabled_irqs;
3220
3221 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3222 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3223
3224 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3225
3226 __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3227}
3228
Paulo Zanonid46da432013-02-08 17:35:15 -02003229static void ibx_irq_postinstall(struct drm_device *dev)
3230{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003231 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003232 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02003233
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003234 if (HAS_PCH_NOP(dev_priv))
Daniel Vetter692a04c2013-05-29 21:43:05 +02003235 return;
3236
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003237 if (HAS_PCH_IBX(dev_priv))
Daniel Vetter5c673b62014-03-07 20:34:46 +01003238 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
Paulo Zanoni105b1222014-04-01 15:37:17 -03003239 else
Daniel Vetter5c673b62014-03-07 20:34:46 +01003240 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
Paulo Zanoni86642812013-04-12 17:57:57 -03003241
Ville Syrjäläb51a2842015-09-18 20:03:41 +03003242 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
Paulo Zanonid46da432013-02-08 17:35:15 -02003243 I915_WRITE(SDEIMR, ~mask);
Imre Deak2a57d9c2017-01-27 11:39:18 +02003244
3245 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3246 HAS_PCH_LPT(dev_priv))
Imre Deak1a56b1a2017-01-27 11:39:21 +02003247 ibx_hpd_detection_setup(dev_priv);
Imre Deak2a57d9c2017-01-27 11:39:18 +02003248 else
3249 spt_hpd_detection_setup(dev_priv);
Paulo Zanonid46da432013-02-08 17:35:15 -02003250}
3251
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003252static void gen5_gt_irq_postinstall(struct drm_device *dev)
3253{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003254 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003255 u32 pm_irqs, gt_irqs;
3256
3257 pm_irqs = gt_irqs = 0;
3258
3259 dev_priv->gt_irq_mask = ~0;
Tvrtko Ursulin3c9192b2016-10-13 11:03:05 +01003260 if (HAS_L3_DPF(dev_priv)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003261 /* L3 parity interrupt is always unmasked. */
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01003262 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3263 gt_irqs |= GT_PARITY_ERROR(dev_priv);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003264 }
3265
3266 gt_irqs |= GT_RENDER_USER_INTERRUPT;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003267 if (IS_GEN5(dev_priv)) {
Chris Wilsonf8973c22016-07-01 17:23:21 +01003268 gt_irqs |= ILK_BSD_USER_INTERRUPT;
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003269 } else {
3270 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3271 }
3272
Paulo Zanoni35079892014-04-01 15:37:15 -03003273 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003274
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003275 if (INTEL_GEN(dev_priv) >= 6) {
Imre Deak78e68d32014-12-15 18:59:27 +02003276 /*
3277 * RPS interrupts will get enabled/disabled on demand when RPS
3278 * itself is enabled/disabled.
3279 */
Akash Goelf4e9af42016-10-12 21:54:30 +05303280 if (HAS_VEBOX(dev_priv)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003281 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
Akash Goelf4e9af42016-10-12 21:54:30 +05303282 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3283 }
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003284
Akash Goelf4e9af42016-10-12 21:54:30 +05303285 dev_priv->pm_imr = 0xffffffff;
3286 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003287 }
3288}
3289
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003290static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003291{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003292 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003293 u32 display_mask, extra_mask;
3294
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003295 if (INTEL_GEN(dev_priv) >= 7) {
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003296 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3297 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3298 DE_PLANEB_FLIP_DONE_IVB |
Daniel Vetter5c673b62014-03-07 20:34:46 +01003299 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003300 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03003301 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3302 DE_DP_A_HOTPLUG_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003303 } else {
3304 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3305 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02003306 DE_AUX_CHANNEL_A |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02003307 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3308 DE_POISON);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003309 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3310 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3311 DE_DP_A_HOTPLUG);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003312 }
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003313
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003314 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003315
Paulo Zanoni0c841212014-04-01 15:37:27 -03003316 I915_WRITE(HWSTAM, 0xeffe);
3317
Paulo Zanoni622364b2014-04-01 15:37:22 -03003318 ibx_irq_pre_postinstall(dev);
3319
Paulo Zanoni35079892014-04-01 15:37:15 -03003320 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003321
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003322 gen5_gt_irq_postinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003323
Imre Deak1a56b1a2017-01-27 11:39:21 +02003324 ilk_hpd_detection_setup(dev_priv);
3325
Paulo Zanonid46da432013-02-08 17:35:15 -02003326 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07003327
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01003328 if (IS_IRONLAKE_M(dev_priv)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02003329 /* Enable PCU event interrupts
3330 *
3331 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02003332 * setup is guaranteed to run in single-threaded context. But we
3333 * need it to make the assert_spin_locked happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003334 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02003335 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetterd6207432014-09-15 14:55:27 +02003336 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -08003337 }
3338
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003339 return 0;
3340}
3341
Imre Deakf8b79e52014-03-04 19:23:07 +02003342void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3343{
Chris Wilson67520412017-03-02 13:28:01 +00003344 lockdep_assert_held(&dev_priv->irq_lock);
Imre Deakf8b79e52014-03-04 19:23:07 +02003345
3346 if (dev_priv->display_irqs_enabled)
3347 return;
3348
3349 dev_priv->display_irqs_enabled = true;
3350
Ville Syrjäläd6c69802016-04-11 16:56:27 +03003351 if (intel_irqs_enabled(dev_priv)) {
3352 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003353 vlv_display_irq_postinstall(dev_priv);
Ville Syrjäläd6c69802016-04-11 16:56:27 +03003354 }
Imre Deakf8b79e52014-03-04 19:23:07 +02003355}
3356
3357void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3358{
Chris Wilson67520412017-03-02 13:28:01 +00003359 lockdep_assert_held(&dev_priv->irq_lock);
Imre Deakf8b79e52014-03-04 19:23:07 +02003360
3361 if (!dev_priv->display_irqs_enabled)
3362 return;
3363
3364 dev_priv->display_irqs_enabled = false;
3365
Imre Deak950eaba2014-09-08 15:21:09 +03003366 if (intel_irqs_enabled(dev_priv))
Ville Syrjäläad22d102016-04-12 18:56:14 +03003367 vlv_display_irq_reset(dev_priv);
Imre Deakf8b79e52014-03-04 19:23:07 +02003368}
3369
Ville Syrjälä0e6c9a92014-10-30 19:43:00 +02003370
3371static int valleyview_irq_postinstall(struct drm_device *dev)
3372{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003373 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä0e6c9a92014-10-30 19:43:00 +02003374
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003375 gen5_gt_irq_postinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003376
Ville Syrjäläad22d102016-04-12 18:56:14 +03003377 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003378 if (dev_priv->display_irqs_enabled)
3379 vlv_display_irq_postinstall(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003380 spin_unlock_irq(&dev_priv->irq_lock);
3381
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003382 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Ville Syrjälä34c7b8a2016-04-13 21:19:48 +03003383 POSTING_READ(VLV_MASTER_IER);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003384
3385 return 0;
3386}
3387
Ben Widawskyabd58f02013-11-02 21:07:09 -07003388static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3389{
Ben Widawskyabd58f02013-11-02 21:07:09 -07003390 /* These are interrupts we'll toggle with the ring mask register */
3391 uint32_t gt_interrupts[] = {
3392 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003393 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003394 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3395 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
Ben Widawskyabd58f02013-11-02 21:07:09 -07003396 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003397 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3398 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3399 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
Ben Widawskyabd58f02013-11-02 21:07:09 -07003400 0,
Oscar Mateo73d477f2014-07-24 17:04:31 +01003401 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3402 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
Ben Widawskyabd58f02013-11-02 21:07:09 -07003403 };
3404
Tvrtko Ursulin98735732016-04-19 16:46:08 +01003405 if (HAS_L3_DPF(dev_priv))
3406 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3407
Akash Goelf4e9af42016-10-12 21:54:30 +05303408 dev_priv->pm_ier = 0x0;
3409 dev_priv->pm_imr = ~dev_priv->pm_ier;
Deepak S9a2d2d82014-08-22 08:32:40 +05303410 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3411 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
Imre Deak78e68d32014-12-15 18:59:27 +02003412 /*
3413 * RPS interrupts will get enabled/disabled on demand when RPS itself
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05303414 * is enabled/disabled. Same wil be the case for GuC interrupts.
Imre Deak78e68d32014-12-15 18:59:27 +02003415 */
Akash Goelf4e9af42016-10-12 21:54:30 +05303416 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
Deepak S9a2d2d82014-08-22 08:32:40 +05303417 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003418}
3419
3420static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3421{
Damien Lespiau770de832014-03-20 20:45:01 +00003422 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3423 uint32_t de_pipe_enables;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003424 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3425 u32 de_port_enables;
Ville Syrjälä11825b02016-05-19 12:14:43 +03003426 u32 de_misc_masked = GEN8_DE_MISC_GSE;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003427 enum pipe pipe;
Damien Lespiau770de832014-03-20 20:45:01 +00003428
Rodrigo Vivib4834a52015-09-02 15:19:24 -07003429 if (INTEL_INFO(dev_priv)->gen >= 9) {
Damien Lespiau770de832014-03-20 20:45:01 +00003430 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3431 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003432 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3433 GEN9_AUX_CHANNEL_D;
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02003434 if (IS_GEN9_LP(dev_priv))
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003435 de_port_masked |= BXT_DE_PORT_GMBUS;
3436 } else {
Damien Lespiau770de832014-03-20 20:45:01 +00003437 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3438 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003439 }
Damien Lespiau770de832014-03-20 20:45:01 +00003440
3441 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3442 GEN8_PIPE_FIFO_UNDERRUN;
3443
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003444 de_port_enables = de_port_masked;
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02003445 if (IS_GEN9_LP(dev_priv))
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003446 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3447 else if (IS_BROADWELL(dev_priv))
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003448 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3449
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01003450 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3451 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3452 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003453
Damien Lespiau055e3932014-08-18 13:49:10 +01003454 for_each_pipe(dev_priv, pipe)
Daniel Vetterf458ebb2014-09-30 10:56:39 +02003455 if (intel_display_power_is_enabled(dev_priv,
Paulo Zanoni813bde42014-07-04 11:50:29 -03003456 POWER_DOMAIN_PIPE(pipe)))
3457 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3458 dev_priv->de_irq_mask[pipe],
3459 de_pipe_enables);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003460
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003461 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
Ville Syrjälä11825b02016-05-19 12:14:43 +03003462 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
Imre Deak2a57d9c2017-01-27 11:39:18 +02003463
3464 if (IS_GEN9_LP(dev_priv))
3465 bxt_hpd_detection_setup(dev_priv);
Imre Deak1a56b1a2017-01-27 11:39:21 +02003466 else if (IS_BROADWELL(dev_priv))
3467 ilk_hpd_detection_setup(dev_priv);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003468}
3469
3470static int gen8_irq_postinstall(struct drm_device *dev)
3471{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003472 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003473
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003474 if (HAS_PCH_SPLIT(dev_priv))
Shashank Sharma266ea3d2014-08-22 17:40:42 +05303475 ibx_irq_pre_postinstall(dev);
Paulo Zanoni622364b2014-04-01 15:37:22 -03003476
Ben Widawskyabd58f02013-11-02 21:07:09 -07003477 gen8_gt_irq_postinstall(dev_priv);
3478 gen8_de_irq_postinstall(dev_priv);
3479
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003480 if (HAS_PCH_SPLIT(dev_priv))
Shashank Sharma266ea3d2014-08-22 17:40:42 +05303481 ibx_irq_postinstall(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003482
Ville Syrjäläe5328c42016-04-13 21:19:47 +03003483 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003484 POSTING_READ(GEN8_MASTER_IRQ);
3485
3486 return 0;
3487}
3488
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003489static int cherryview_irq_postinstall(struct drm_device *dev)
3490{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003491 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003492
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003493 gen8_gt_irq_postinstall(dev_priv);
3494
Ville Syrjäläad22d102016-04-12 18:56:14 +03003495 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003496 if (dev_priv->display_irqs_enabled)
3497 vlv_display_irq_postinstall(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003498 spin_unlock_irq(&dev_priv->irq_lock);
3499
Ville Syrjäläe5328c42016-04-13 21:19:47 +03003500 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003501 POSTING_READ(GEN8_MASTER_IRQ);
3502
3503 return 0;
3504}
3505
Ben Widawskyabd58f02013-11-02 21:07:09 -07003506static void gen8_irq_uninstall(struct drm_device *dev)
3507{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003508 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003509
3510 if (!dev_priv)
3511 return;
3512
Paulo Zanoni823f6b32014-04-01 15:37:26 -03003513 gen8_irq_reset(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003514}
3515
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003516static void valleyview_irq_uninstall(struct drm_device *dev)
3517{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003518 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003519
3520 if (!dev_priv)
3521 return;
3522
Imre Deak843d0e72014-04-14 20:24:23 +03003523 I915_WRITE(VLV_MASTER_IER, 0);
Ville Syrjälä34c7b8a2016-04-13 21:19:48 +03003524 POSTING_READ(VLV_MASTER_IER);
Imre Deak843d0e72014-04-14 20:24:23 +03003525
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003526 gen5_gt_irq_reset(dev_priv);
Ville Syrjälä893fce82014-10-30 19:42:56 +02003527
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003528 I915_WRITE(HWSTAM, 0xffffffff);
Imre Deakf8b79e52014-03-04 19:23:07 +02003529
Ville Syrjäläad22d102016-04-12 18:56:14 +03003530 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003531 if (dev_priv->display_irqs_enabled)
3532 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003533 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003534}
3535
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003536static void cherryview_irq_uninstall(struct drm_device *dev)
3537{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003538 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003539
3540 if (!dev_priv)
3541 return;
3542
3543 I915_WRITE(GEN8_MASTER_IRQ, 0);
3544 POSTING_READ(GEN8_MASTER_IRQ);
3545
Ville Syrjäläa2c30fb2014-10-30 19:42:52 +02003546 gen8_gt_irq_reset(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003547
Ville Syrjäläa2c30fb2014-10-30 19:42:52 +02003548 GEN5_IRQ_RESET(GEN8_PCU_);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003549
Ville Syrjäläad22d102016-04-12 18:56:14 +03003550 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003551 if (dev_priv->display_irqs_enabled)
3552 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003553 spin_unlock_irq(&dev_priv->irq_lock);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003554}
3555
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003556static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003557{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003558 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes46979952011-04-07 13:53:55 -07003559
3560 if (!dev_priv)
3561 return;
3562
Paulo Zanonibe30b292014-04-01 15:37:25 -03003563 ironlake_irq_reset(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003564}
3565
Chris Wilsonc2798b12012-04-22 21:13:57 +01003566static void i8xx_irq_preinstall(struct drm_device * dev)
3567{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003568 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003569 int pipe;
3570
Damien Lespiau055e3932014-08-18 13:49:10 +01003571 for_each_pipe(dev_priv, pipe)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003572 I915_WRITE(PIPESTAT(pipe), 0);
3573 I915_WRITE16(IMR, 0xffff);
3574 I915_WRITE16(IER, 0x0);
3575 POSTING_READ16(IER);
3576}
3577
3578static int i8xx_irq_postinstall(struct drm_device *dev)
3579{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003580 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003581
Chris Wilsonc2798b12012-04-22 21:13:57 +01003582 I915_WRITE16(EMR,
3583 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3584
3585 /* Unmask the interrupts that we always want on. */
3586 dev_priv->irq_mask =
3587 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3588 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3589 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
Daniel Vetter37ef01a2015-04-01 13:43:46 +02003590 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003591 I915_WRITE16(IMR, dev_priv->irq_mask);
3592
3593 I915_WRITE16(IER,
3594 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3595 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Chris Wilsonc2798b12012-04-22 21:13:57 +01003596 I915_USER_INTERRUPT);
3597 POSTING_READ16(IER);
3598
Daniel Vetter379ef822013-10-16 22:55:56 +02003599 /* Interrupt setup is already guaranteed to be single-threaded, this is
3600 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003601 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02003602 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3603 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02003604 spin_unlock_irq(&dev_priv->irq_lock);
Daniel Vetter379ef822013-10-16 22:55:56 +02003605
Chris Wilsonc2798b12012-04-22 21:13:57 +01003606 return 0;
3607}
3608
Daniel Vetter5a21b662016-05-24 17:13:53 +02003609/*
3610 * Returns true when a page flip has completed.
3611 */
3612static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
3613 int plane, int pipe, u32 iir)
3614{
3615 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3616
3617 if (!intel_pipe_handle_vblank(dev_priv, pipe))
3618 return false;
3619
3620 if ((iir & flip_pending) == 0)
3621 goto check_page_flip;
3622
3623 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3624 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3625 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3626 * the flip is completed (no longer pending). Since this doesn't raise
3627 * an interrupt per se, we watch for the change at vblank.
3628 */
3629 if (I915_READ16(ISR) & flip_pending)
3630 goto check_page_flip;
3631
3632 intel_finish_page_flip_cs(dev_priv, pipe);
3633 return true;
3634
3635check_page_flip:
3636 intel_check_page_flip(dev_priv, pipe);
3637 return false;
3638}
3639
Daniel Vetterff1f5252012-10-02 15:10:55 +02003640static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003641{
Daniel Vetter45a83f82014-05-12 19:17:55 +02003642 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003643 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003644 u16 iir, new_iir;
3645 u32 pipe_stats[2];
Chris Wilsonc2798b12012-04-22 21:13:57 +01003646 int pipe;
3647 u16 flip_mask =
3648 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3649 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Imre Deak1f814da2015-12-16 02:52:19 +02003650 irqreturn_t ret;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003651
Imre Deak2dd2a882015-02-24 11:14:30 +02003652 if (!intel_irqs_enabled(dev_priv))
3653 return IRQ_NONE;
3654
Imre Deak1f814da2015-12-16 02:52:19 +02003655 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3656 disable_rpm_wakeref_asserts(dev_priv);
3657
3658 ret = IRQ_NONE;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003659 iir = I915_READ16(IIR);
3660 if (iir == 0)
Imre Deak1f814da2015-12-16 02:52:19 +02003661 goto out;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003662
3663 while (iir & ~flip_mask) {
3664 /* Can't rely on pipestat interrupt bit in iir as it might
3665 * have been cleared after the pipestat interrupt was received.
3666 * It doesn't set the bit in iir again, but it still produces
3667 * interrupts (for non-MSI).
3668 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02003669 spin_lock(&dev_priv->irq_lock);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003670 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Daniel Vetteraaecdf62014-11-04 15:52:22 +01003671 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003672
Damien Lespiau055e3932014-08-18 13:49:10 +01003673 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003674 i915_reg_t reg = PIPESTAT(pipe);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003675 pipe_stats[pipe] = I915_READ(reg);
3676
3677 /*
3678 * Clear the PIPE*STAT regs before the IIR
3679 */
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003680 if (pipe_stats[pipe] & 0x8000ffff)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003681 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003682 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02003683 spin_unlock(&dev_priv->irq_lock);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003684
3685 I915_WRITE16(IIR, iir & ~flip_mask);
3686 new_iir = I915_READ16(IIR); /* Flush posted writes */
3687
Chris Wilsonc2798b12012-04-22 21:13:57 +01003688 if (iir & I915_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05303689 notify_ring(dev_priv->engine[RCS]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003690
Damien Lespiau055e3932014-08-18 13:49:10 +01003691 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02003692 int plane = pipe;
3693 if (HAS_FBC(dev_priv))
3694 plane = !plane;
3695
3696 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3697 i8xx_handle_vblank(dev_priv, plane, pipe, iir))
3698 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003699
Daniel Vetter4356d582013-10-16 22:55:55 +02003700 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003701 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003702
Daniel Vetter1f7247c2014-09-30 10:56:48 +02003703 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3704 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3705 pipe);
Daniel Vetter4356d582013-10-16 22:55:55 +02003706 }
Chris Wilsonc2798b12012-04-22 21:13:57 +01003707
3708 iir = new_iir;
3709 }
Imre Deak1f814da2015-12-16 02:52:19 +02003710 ret = IRQ_HANDLED;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003711
Imre Deak1f814da2015-12-16 02:52:19 +02003712out:
3713 enable_rpm_wakeref_asserts(dev_priv);
3714
3715 return ret;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003716}
3717
3718static void i8xx_irq_uninstall(struct drm_device * dev)
3719{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003720 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003721 int pipe;
3722
Damien Lespiau055e3932014-08-18 13:49:10 +01003723 for_each_pipe(dev_priv, pipe) {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003724 /* Clear enable bits; then clear status bits */
3725 I915_WRITE(PIPESTAT(pipe), 0);
3726 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3727 }
3728 I915_WRITE16(IMR, 0xffff);
3729 I915_WRITE16(IER, 0x0);
3730 I915_WRITE16(IIR, I915_READ16(IIR));
3731}
3732
Chris Wilsona266c7d2012-04-24 22:59:44 +01003733static void i915_irq_preinstall(struct drm_device * dev)
3734{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003735 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003736 int pipe;
3737
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00003738 if (I915_HAS_HOTPLUG(dev_priv)) {
Egbert Eich0706f172015-09-23 16:15:27 +02003739 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003740 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3741 }
3742
Chris Wilson00d98eb2012-04-24 22:59:48 +01003743 I915_WRITE16(HWSTAM, 0xeffe);
Damien Lespiau055e3932014-08-18 13:49:10 +01003744 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003745 I915_WRITE(PIPESTAT(pipe), 0);
3746 I915_WRITE(IMR, 0xffffffff);
3747 I915_WRITE(IER, 0x0);
3748 POSTING_READ(IER);
3749}
3750
3751static int i915_irq_postinstall(struct drm_device *dev)
3752{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003753 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson38bde182012-04-24 22:59:50 +01003754 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003755
Chris Wilson38bde182012-04-24 22:59:50 +01003756 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3757
3758 /* Unmask the interrupts that we always want on. */
3759 dev_priv->irq_mask =
3760 ~(I915_ASLE_INTERRUPT |
3761 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3762 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3763 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
Daniel Vetter37ef01a2015-04-01 13:43:46 +02003764 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilson38bde182012-04-24 22:59:50 +01003765
3766 enable_mask =
3767 I915_ASLE_INTERRUPT |
3768 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3769 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Chris Wilson38bde182012-04-24 22:59:50 +01003770 I915_USER_INTERRUPT;
3771
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00003772 if (I915_HAS_HOTPLUG(dev_priv)) {
Egbert Eich0706f172015-09-23 16:15:27 +02003773 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003774 POSTING_READ(PORT_HOTPLUG_EN);
3775
Chris Wilsona266c7d2012-04-24 22:59:44 +01003776 /* Enable in IER... */
3777 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3778 /* and unmask in IMR */
3779 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3780 }
3781
Chris Wilsona266c7d2012-04-24 22:59:44 +01003782 I915_WRITE(IMR, dev_priv->irq_mask);
3783 I915_WRITE(IER, enable_mask);
3784 POSTING_READ(IER);
3785
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003786 i915_enable_asle_pipestat(dev_priv);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003787
Daniel Vetter379ef822013-10-16 22:55:56 +02003788 /* Interrupt setup is already guaranteed to be single-threaded, this is
3789 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003790 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02003791 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3792 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02003793 spin_unlock_irq(&dev_priv->irq_lock);
Daniel Vetter379ef822013-10-16 22:55:56 +02003794
Daniel Vetter20afbda2012-12-11 14:05:07 +01003795 return 0;
3796}
3797
Daniel Vetter5a21b662016-05-24 17:13:53 +02003798/*
3799 * Returns true when a page flip has completed.
3800 */
3801static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
3802 int plane, int pipe, u32 iir)
3803{
3804 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3805
3806 if (!intel_pipe_handle_vblank(dev_priv, pipe))
3807 return false;
3808
3809 if ((iir & flip_pending) == 0)
3810 goto check_page_flip;
3811
3812 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3813 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3814 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3815 * the flip is completed (no longer pending). Since this doesn't raise
3816 * an interrupt per se, we watch for the change at vblank.
3817 */
3818 if (I915_READ(ISR) & flip_pending)
3819 goto check_page_flip;
3820
3821 intel_finish_page_flip_cs(dev_priv, pipe);
3822 return true;
3823
3824check_page_flip:
3825 intel_check_page_flip(dev_priv, pipe);
3826 return false;
3827}
3828
Daniel Vetterff1f5252012-10-02 15:10:55 +02003829static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003830{
Daniel Vetter45a83f82014-05-12 19:17:55 +02003831 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003832 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson8291ee92012-04-24 22:59:47 +01003833 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilson38bde182012-04-24 22:59:50 +01003834 u32 flip_mask =
3835 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3836 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilson38bde182012-04-24 22:59:50 +01003837 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003838
Imre Deak2dd2a882015-02-24 11:14:30 +02003839 if (!intel_irqs_enabled(dev_priv))
3840 return IRQ_NONE;
3841
Imre Deak1f814da2015-12-16 02:52:19 +02003842 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3843 disable_rpm_wakeref_asserts(dev_priv);
3844
Chris Wilsona266c7d2012-04-24 22:59:44 +01003845 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01003846 do {
3847 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01003848 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003849
3850 /* Can't rely on pipestat interrupt bit in iir as it might
3851 * have been cleared after the pipestat interrupt was received.
3852 * It doesn't set the bit in iir again, but it still produces
3853 * interrupts (for non-MSI).
3854 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02003855 spin_lock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003856 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Daniel Vetteraaecdf62014-11-04 15:52:22 +01003857 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003858
Damien Lespiau055e3932014-08-18 13:49:10 +01003859 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003860 i915_reg_t reg = PIPESTAT(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003861 pipe_stats[pipe] = I915_READ(reg);
3862
Chris Wilson38bde182012-04-24 22:59:50 +01003863 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003864 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003865 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01003866 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003867 }
3868 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02003869 spin_unlock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003870
3871 if (!irq_received)
3872 break;
3873
Chris Wilsona266c7d2012-04-24 22:59:44 +01003874 /* Consume port. Then clear IIR or we'll miss events */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003875 if (I915_HAS_HOTPLUG(dev_priv) &&
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03003876 iir & I915_DISPLAY_PORT_INTERRUPT) {
3877 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3878 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003879 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03003880 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003881
Chris Wilson38bde182012-04-24 22:59:50 +01003882 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003883 new_iir = I915_READ(IIR); /* Flush posted writes */
3884
Chris Wilsona266c7d2012-04-24 22:59:44 +01003885 if (iir & I915_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05303886 notify_ring(dev_priv->engine[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003887
Damien Lespiau055e3932014-08-18 13:49:10 +01003888 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02003889 int plane = pipe;
3890 if (HAS_FBC(dev_priv))
3891 plane = !plane;
3892
3893 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3894 i915_handle_vblank(dev_priv, plane, pipe, iir))
3895 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003896
3897 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3898 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003899
3900 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003901 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003902
Daniel Vetter1f7247c2014-09-30 10:56:48 +02003903 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3904 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3905 pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003906 }
3907
Chris Wilsona266c7d2012-04-24 22:59:44 +01003908 if (blc_event || (iir & I915_ASLE_INTERRUPT))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003909 intel_opregion_asle_intr(dev_priv);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003910
3911 /* With MSI, interrupts are only generated when iir
3912 * transitions from zero to nonzero. If another bit got
3913 * set while we were handling the existing iir bits, then
3914 * we would never get another interrupt.
3915 *
3916 * This is fine on non-MSI as well, as if we hit this path
3917 * we avoid exiting the interrupt handler only to generate
3918 * another one.
3919 *
3920 * Note that for MSI this could cause a stray interrupt report
3921 * if an interrupt landed in the time between writing IIR and
3922 * the posting read. This should be rare enough to never
3923 * trigger the 99% of 100,000 interrupts test for disabling
3924 * stray interrupts.
3925 */
Chris Wilson38bde182012-04-24 22:59:50 +01003926 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003927 iir = new_iir;
Chris Wilson38bde182012-04-24 22:59:50 +01003928 } while (iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003929
Imre Deak1f814da2015-12-16 02:52:19 +02003930 enable_rpm_wakeref_asserts(dev_priv);
3931
Chris Wilsona266c7d2012-04-24 22:59:44 +01003932 return ret;
3933}
3934
3935static void i915_irq_uninstall(struct drm_device * dev)
3936{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003937 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003938 int pipe;
3939
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00003940 if (I915_HAS_HOTPLUG(dev_priv)) {
Egbert Eich0706f172015-09-23 16:15:27 +02003941 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003942 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3943 }
3944
Chris Wilson00d98eb2012-04-24 22:59:48 +01003945 I915_WRITE16(HWSTAM, 0xffff);
Damien Lespiau055e3932014-08-18 13:49:10 +01003946 for_each_pipe(dev_priv, pipe) {
Chris Wilson55b39752012-04-24 22:59:49 +01003947 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003948 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01003949 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3950 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003951 I915_WRITE(IMR, 0xffffffff);
3952 I915_WRITE(IER, 0x0);
3953
Chris Wilsona266c7d2012-04-24 22:59:44 +01003954 I915_WRITE(IIR, I915_READ(IIR));
3955}
3956
3957static void i965_irq_preinstall(struct drm_device * dev)
3958{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003959 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003960 int pipe;
3961
Egbert Eich0706f172015-09-23 16:15:27 +02003962 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsonadca4732012-05-11 18:01:31 +01003963 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003964
3965 I915_WRITE(HWSTAM, 0xeffe);
Damien Lespiau055e3932014-08-18 13:49:10 +01003966 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003967 I915_WRITE(PIPESTAT(pipe), 0);
3968 I915_WRITE(IMR, 0xffffffff);
3969 I915_WRITE(IER, 0x0);
3970 POSTING_READ(IER);
3971}
3972
3973static int i965_irq_postinstall(struct drm_device *dev)
3974{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003975 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003976 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003977 u32 error_mask;
3978
Chris Wilsona266c7d2012-04-24 22:59:44 +01003979 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003980 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01003981 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003982 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3983 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3984 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3985 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3986 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3987
3988 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003989 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3990 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003991 enable_mask |= I915_USER_INTERRUPT;
3992
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003993 if (IS_G4X(dev_priv))
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003994 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003995
Daniel Vetterb79480b2013-06-27 17:52:10 +02003996 /* Interrupt setup is already guaranteed to be single-threaded, this is
3997 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003998 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02003999 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4000 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4001 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02004002 spin_unlock_irq(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004003
Chris Wilsona266c7d2012-04-24 22:59:44 +01004004 /*
4005 * Enable some error detection, note the instruction error mask
4006 * bit is reserved, so we leave it masked.
4007 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004008 if (IS_G4X(dev_priv)) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004009 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4010 GM45_ERROR_MEM_PRIV |
4011 GM45_ERROR_CP_PRIV |
4012 I915_ERROR_MEMORY_REFRESH);
4013 } else {
4014 error_mask = ~(I915_ERROR_PAGE_TABLE |
4015 I915_ERROR_MEMORY_REFRESH);
4016 }
4017 I915_WRITE(EMR, error_mask);
4018
4019 I915_WRITE(IMR, dev_priv->irq_mask);
4020 I915_WRITE(IER, enable_mask);
4021 POSTING_READ(IER);
4022
Egbert Eich0706f172015-09-23 16:15:27 +02004023 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004024 POSTING_READ(PORT_HOTPLUG_EN);
4025
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004026 i915_enable_asle_pipestat(dev_priv);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004027
4028 return 0;
4029}
4030
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004031static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
Daniel Vetter20afbda2012-12-11 14:05:07 +01004032{
Daniel Vetter20afbda2012-12-11 14:05:07 +01004033 u32 hotplug_en;
4034
Chris Wilson67520412017-03-02 13:28:01 +00004035 lockdep_assert_held(&dev_priv->irq_lock);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02004036
Ville Syrjälä778eb332015-01-09 14:21:13 +02004037 /* Note HDMI and DP share hotplug bits */
4038 /* enable bits are the same for all generations */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004039 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
Ville Syrjälä778eb332015-01-09 14:21:13 +02004040 /* Programming the CRT detection parameters tends
4041 to generate a spurious hotplug event about three
4042 seconds later. So just do it once.
4043 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004044 if (IS_G4X(dev_priv))
Ville Syrjälä778eb332015-01-09 14:21:13 +02004045 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Ville Syrjälä778eb332015-01-09 14:21:13 +02004046 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004047
Ville Syrjälä778eb332015-01-09 14:21:13 +02004048 /* Ignore TV since it's buggy */
Egbert Eich0706f172015-09-23 16:15:27 +02004049 i915_hotplug_interrupt_update_locked(dev_priv,
Jani Nikulaf9e3dc72015-10-21 17:22:43 +03004050 HOTPLUG_INT_EN_MASK |
4051 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4052 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4053 hotplug_en);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004054}
4055
Daniel Vetterff1f5252012-10-02 15:10:55 +02004056static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004057{
Daniel Vetter45a83f82014-05-12 19:17:55 +02004058 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004059 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004060 u32 iir, new_iir;
4061 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01004062 int ret = IRQ_NONE, pipe;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02004063 u32 flip_mask =
4064 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4065 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004066
Imre Deak2dd2a882015-02-24 11:14:30 +02004067 if (!intel_irqs_enabled(dev_priv))
4068 return IRQ_NONE;
4069
Imre Deak1f814da2015-12-16 02:52:19 +02004070 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4071 disable_rpm_wakeref_asserts(dev_priv);
4072
Chris Wilsona266c7d2012-04-24 22:59:44 +01004073 iir = I915_READ(IIR);
4074
Chris Wilsona266c7d2012-04-24 22:59:44 +01004075 for (;;) {
Ville Syrjälä501e01d2014-01-17 11:35:15 +02004076 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson2c8ba292012-04-24 22:59:46 +01004077 bool blc_event = false;
4078
Chris Wilsona266c7d2012-04-24 22:59:44 +01004079 /* Can't rely on pipestat interrupt bit in iir as it might
4080 * have been cleared after the pipestat interrupt was received.
4081 * It doesn't set the bit in iir again, but it still produces
4082 * interrupts (for non-MSI).
4083 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02004084 spin_lock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004085 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Daniel Vetteraaecdf62014-11-04 15:52:22 +01004086 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004087
Damien Lespiau055e3932014-08-18 13:49:10 +01004088 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004089 i915_reg_t reg = PIPESTAT(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004090 pipe_stats[pipe] = I915_READ(reg);
4091
4092 /*
4093 * Clear the PIPE*STAT regs before the IIR
4094 */
4095 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004096 I915_WRITE(reg, pipe_stats[pipe]);
Ville Syrjälä501e01d2014-01-17 11:35:15 +02004097 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004098 }
4099 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02004100 spin_unlock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004101
4102 if (!irq_received)
4103 break;
4104
4105 ret = IRQ_HANDLED;
4106
4107 /* Consume port. Then clear IIR or we'll miss events */
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03004108 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4109 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4110 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004111 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03004112 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01004113
Ville Syrjälä21ad8332013-02-19 15:16:39 +02004114 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004115 new_iir = I915_READ(IIR); /* Flush posted writes */
4116
Chris Wilsona266c7d2012-04-24 22:59:44 +01004117 if (iir & I915_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05304118 notify_ring(dev_priv->engine[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004119 if (iir & I915_BSD_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05304120 notify_ring(dev_priv->engine[VCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004121
Damien Lespiau055e3932014-08-18 13:49:10 +01004122 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02004123 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4124 i915_handle_vblank(dev_priv, pipe, pipe, iir))
4125 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004126
4127 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4128 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02004129
4130 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004131 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004132
Daniel Vetter1f7247c2014-09-30 10:56:48 +02004133 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4134 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02004135 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01004136
4137 if (blc_event || (iir & I915_ASLE_INTERRUPT))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004138 intel_opregion_asle_intr(dev_priv);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004139
Daniel Vetter515ac2b2012-12-01 13:53:44 +01004140 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004141 gmbus_irq_handler(dev_priv);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01004142
Chris Wilsona266c7d2012-04-24 22:59:44 +01004143 /* With MSI, interrupts are only generated when iir
4144 * transitions from zero to nonzero. If another bit got
4145 * set while we were handling the existing iir bits, then
4146 * we would never get another interrupt.
4147 *
4148 * This is fine on non-MSI as well, as if we hit this path
4149 * we avoid exiting the interrupt handler only to generate
4150 * another one.
4151 *
4152 * Note that for MSI this could cause a stray interrupt report
4153 * if an interrupt landed in the time between writing IIR and
4154 * the posting read. This should be rare enough to never
4155 * trigger the 99% of 100,000 interrupts test for disabling
4156 * stray interrupts.
4157 */
4158 iir = new_iir;
4159 }
4160
Imre Deak1f814da2015-12-16 02:52:19 +02004161 enable_rpm_wakeref_asserts(dev_priv);
4162
Chris Wilsona266c7d2012-04-24 22:59:44 +01004163 return ret;
4164}
4165
4166static void i965_irq_uninstall(struct drm_device * dev)
4167{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004168 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004169 int pipe;
4170
4171 if (!dev_priv)
4172 return;
4173
Egbert Eich0706f172015-09-23 16:15:27 +02004174 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsonadca4732012-05-11 18:01:31 +01004175 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01004176
4177 I915_WRITE(HWSTAM, 0xffffffff);
Damien Lespiau055e3932014-08-18 13:49:10 +01004178 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004179 I915_WRITE(PIPESTAT(pipe), 0);
4180 I915_WRITE(IMR, 0xffffffff);
4181 I915_WRITE(IER, 0x0);
4182
Damien Lespiau055e3932014-08-18 13:49:10 +01004183 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004184 I915_WRITE(PIPESTAT(pipe),
4185 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4186 I915_WRITE(IIR, I915_READ(IIR));
4187}
4188
Daniel Vetterfca52a52014-09-30 10:56:45 +02004189/**
4190 * intel_irq_init - initializes irq support
4191 * @dev_priv: i915 device instance
4192 *
4193 * This function initializes all the irq support including work items, timers
4194 * and all the vtables. It does not setup the interrupt itself though.
4195 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004196void intel_irq_init(struct drm_i915_private *dev_priv)
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004197{
Chris Wilson91c8a322016-07-05 10:40:23 +01004198 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinencefcff82017-04-28 10:58:39 +03004199 int i;
Chris Wilson8b2e3262012-04-24 22:59:41 +01004200
Jani Nikula77913b32015-06-18 13:06:16 +03004201 intel_hpd_init_work(dev_priv);
4202
Daniel Vetterc6a828d2012-08-08 23:35:35 +02004203 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Joonas Lahtinencefcff82017-04-28 10:58:39 +03004204
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004205 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Joonas Lahtinencefcff82017-04-28 10:58:39 +03004206 for (i = 0; i < MAX_L3_SLICES; ++i)
4207 dev_priv->l3_parity.remap_info[i] = NULL;
Chris Wilson8b2e3262012-04-24 22:59:41 +01004208
Tvrtko Ursulin4805fe82016-11-04 14:42:46 +00004209 if (HAS_GUC_SCHED(dev_priv))
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05304210 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4211
Deepak Sa6706b42014-03-15 20:23:22 +05304212 /* Let's track the enabled rps events */
Wayne Boyer666a4532015-12-09 12:29:35 -08004213 if (IS_VALLEYVIEW(dev_priv))
Ville Syrjälä6c65a582014-08-29 14:14:07 +03004214 /* WaGsvRC0ResidencyMethod:vlv */
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00004215 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
Deepak S31685c22014-07-03 17:33:01 -04004216 else
4217 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
Deepak Sa6706b42014-03-15 20:23:22 +05304218
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05304219 dev_priv->rps.pm_intrmsk_mbz = 0;
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05304220
4221 /*
Mika Kuoppalaacf2dc22017-04-13 14:15:27 +03004222 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05304223 * if GEN6_PM_UP_EI_EXPIRED is masked.
4224 *
4225 * TODO: verify if this can be reproduced on VLV,CHV.
4226 */
Mika Kuoppalaacf2dc22017-04-13 14:15:27 +03004227 if (INTEL_INFO(dev_priv)->gen <= 7)
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05304228 dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05304229
4230 if (INTEL_INFO(dev_priv)->gen >= 8)
Chris Wilson655d49e2017-03-12 13:27:45 +00004231 dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05304232
Daniel Vetterb9632912014-09-30 10:56:44 +02004233 if (IS_GEN2(dev_priv)) {
Rodrigo Vivi4194c082016-08-03 10:00:56 -07004234 /* Gen2 doesn't have a hardware frame counter */
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +03004235 dev->max_vblank_count = 0;
Daniel Vetterb9632912014-09-30 10:56:44 +02004236 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004237 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
Ville Syrjäläfd8f5072015-09-18 20:03:42 +03004238 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
Ville Syrjälä391f75e2013-09-25 19:55:26 +03004239 } else {
4240 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4241 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004242 }
4243
Ville Syrjälä21da2702014-08-06 14:49:55 +03004244 /*
4245 * Opt out of the vblank disable timer on everything except gen2.
4246 * Gen2 doesn't have a hardware frame counter and so depends on
4247 * vblank interrupts to produce sane vblank seuquence numbers.
4248 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004249 if (!IS_GEN2(dev_priv))
Ville Syrjälä21da2702014-08-06 14:49:55 +03004250 dev->vblank_disable_immediate = true;
4251
Chris Wilson262fd482017-02-15 13:15:47 +00004252 /* Most platforms treat the display irq block as an always-on
4253 * power domain. vlv/chv can disable it at runtime and need
4254 * special care to avoid writing any of the display block registers
4255 * outside of the power domain. We defer setting up the display irqs
4256 * in this case to the runtime pm.
4257 */
4258 dev_priv->display_irqs_enabled = true;
4259 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4260 dev_priv->display_irqs_enabled = false;
4261
Lyude317eaa92017-02-03 21:18:25 -05004262 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4263
Daniel Vetter1bf6ad62017-05-09 16:03:28 +02004264 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
Daniel Vetterf3a5c3f2015-02-13 21:03:44 +01004265 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004266
Daniel Vetterb9632912014-09-30 10:56:44 +02004267 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004268 dev->driver->irq_handler = cherryview_irq_handler;
4269 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4270 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4271 dev->driver->irq_uninstall = cherryview_irq_uninstall;
Chris Wilson86e83e32016-10-07 20:49:52 +01004272 dev->driver->enable_vblank = i965_enable_vblank;
4273 dev->driver->disable_vblank = i965_disable_vblank;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004274 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Daniel Vetterb9632912014-09-30 10:56:44 +02004275 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07004276 dev->driver->irq_handler = valleyview_irq_handler;
4277 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4278 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4279 dev->driver->irq_uninstall = valleyview_irq_uninstall;
Chris Wilson86e83e32016-10-07 20:49:52 +01004280 dev->driver->enable_vblank = i965_enable_vblank;
4281 dev->driver->disable_vblank = i965_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05004282 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Daniel Vetterb9632912014-09-30 10:56:44 +02004283 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
Ben Widawskyabd58f02013-11-02 21:07:09 -07004284 dev->driver->irq_handler = gen8_irq_handler;
Daniel Vetter723761b2014-05-22 17:56:34 +02004285 dev->driver->irq_preinstall = gen8_irq_reset;
Ben Widawskyabd58f02013-11-02 21:07:09 -07004286 dev->driver->irq_postinstall = gen8_irq_postinstall;
4287 dev->driver->irq_uninstall = gen8_irq_uninstall;
4288 dev->driver->enable_vblank = gen8_enable_vblank;
4289 dev->driver->disable_vblank = gen8_disable_vblank;
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004290 if (IS_GEN9_LP(dev_priv))
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02004291 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
Rodrigo Vivi7b22b8c2017-06-02 13:06:39 -07004292 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
4293 HAS_PCH_CNP(dev_priv))
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03004294 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4295 else
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03004296 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004297 } else if (HAS_PCH_SPLIT(dev_priv)) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004298 dev->driver->irq_handler = ironlake_irq_handler;
Daniel Vetter723761b2014-05-22 17:56:34 +02004299 dev->driver->irq_preinstall = ironlake_irq_reset;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004300 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4301 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4302 dev->driver->enable_vblank = ironlake_enable_vblank;
4303 dev->driver->disable_vblank = ironlake_disable_vblank;
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03004304 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004305 } else {
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01004306 if (IS_GEN2(dev_priv)) {
Chris Wilsonc2798b12012-04-22 21:13:57 +01004307 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4308 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4309 dev->driver->irq_handler = i8xx_irq_handler;
4310 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Chris Wilson86e83e32016-10-07 20:49:52 +01004311 dev->driver->enable_vblank = i8xx_enable_vblank;
4312 dev->driver->disable_vblank = i8xx_disable_vblank;
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01004313 } else if (IS_GEN3(dev_priv)) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004314 dev->driver->irq_preinstall = i915_irq_preinstall;
4315 dev->driver->irq_postinstall = i915_irq_postinstall;
4316 dev->driver->irq_uninstall = i915_irq_uninstall;
4317 dev->driver->irq_handler = i915_irq_handler;
Chris Wilson86e83e32016-10-07 20:49:52 +01004318 dev->driver->enable_vblank = i8xx_enable_vblank;
4319 dev->driver->disable_vblank = i8xx_disable_vblank;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004320 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004321 dev->driver->irq_preinstall = i965_irq_preinstall;
4322 dev->driver->irq_postinstall = i965_irq_postinstall;
4323 dev->driver->irq_uninstall = i965_irq_uninstall;
4324 dev->driver->irq_handler = i965_irq_handler;
Chris Wilson86e83e32016-10-07 20:49:52 +01004325 dev->driver->enable_vblank = i965_enable_vblank;
4326 dev->driver->disable_vblank = i965_disable_vblank;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004327 }
Ville Syrjälä778eb332015-01-09 14:21:13 +02004328 if (I915_HAS_HOTPLUG(dev_priv))
4329 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004330 }
4331}
Daniel Vetter20afbda2012-12-11 14:05:07 +01004332
Daniel Vetterfca52a52014-09-30 10:56:45 +02004333/**
Joonas Lahtinencefcff82017-04-28 10:58:39 +03004334 * intel_irq_fini - deinitializes IRQ support
4335 * @i915: i915 device instance
4336 *
4337 * This function deinitializes all the IRQ support.
4338 */
4339void intel_irq_fini(struct drm_i915_private *i915)
4340{
4341 int i;
4342
4343 for (i = 0; i < MAX_L3_SLICES; ++i)
4344 kfree(i915->l3_parity.remap_info[i]);
4345}
4346
4347/**
Daniel Vetterfca52a52014-09-30 10:56:45 +02004348 * intel_irq_install - enables the hardware interrupt
4349 * @dev_priv: i915 device instance
4350 *
4351 * This function enables the hardware interrupt handling, but leaves the hotplug
4352 * handling still disabled. It is called after intel_irq_init().
4353 *
4354 * In the driver load and resume code we need working interrupts in a few places
4355 * but don't want to deal with the hassle of concurrent probe and hotplug
4356 * workers. Hence the split into this two-stage approach.
4357 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004358int intel_irq_install(struct drm_i915_private *dev_priv)
4359{
4360 /*
4361 * We enable some interrupt sources in our postinstall hooks, so mark
4362 * interrupts as enabled _before_ actually enabling them to avoid
4363 * special cases in our ordering checks.
4364 */
4365 dev_priv->pm.irqs_enabled = true;
4366
Chris Wilson91c8a322016-07-05 10:40:23 +01004367 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004368}
4369
Daniel Vetterfca52a52014-09-30 10:56:45 +02004370/**
4371 * intel_irq_uninstall - finilizes all irq handling
4372 * @dev_priv: i915 device instance
4373 *
4374 * This stops interrupt and hotplug handling and unregisters and frees all
4375 * resources acquired in the init functions.
4376 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004377void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4378{
Chris Wilson91c8a322016-07-05 10:40:23 +01004379 drm_irq_uninstall(&dev_priv->drm);
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004380 intel_hpd_cancel_work(dev_priv);
4381 dev_priv->pm.irqs_enabled = false;
4382}
4383
Daniel Vetterfca52a52014-09-30 10:56:45 +02004384/**
4385 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4386 * @dev_priv: i915 device instance
4387 *
4388 * This function is used to disable interrupts at runtime, both in the runtime
4389 * pm and the system suspend/resume code.
4390 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004391void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004392{
Chris Wilson91c8a322016-07-05 10:40:23 +01004393 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004394 dev_priv->pm.irqs_enabled = false;
Chris Wilson91c8a322016-07-05 10:40:23 +01004395 synchronize_irq(dev_priv->drm.irq);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004396}
4397
Daniel Vetterfca52a52014-09-30 10:56:45 +02004398/**
4399 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4400 * @dev_priv: i915 device instance
4401 *
4402 * This function is used to enable interrupts at runtime, both in the runtime
4403 * pm and the system suspend/resume code.
4404 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004405void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004406{
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004407 dev_priv->pm.irqs_enabled = true;
Chris Wilson91c8a322016-07-05 10:40:23 +01004408 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4409 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004410}