blob: caaf1e2a7bc17336eae244f078f64d2b2cc4640b [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Damien Lespiaub2c88f52013-10-15 18:55:29 +010033#include <linux/circ_buf.h>
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/drmP.h>
35#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010037#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Daniel Vetterfca52a52014-09-30 10:56:45 +020040/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +030048static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50};
51
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +030052static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54};
55
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +030056static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58};
59
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020060static const u32 hpd_ibx[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050061 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66};
67
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020068static const u32 hpd_cpt[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050069 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010070 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050071 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74};
75
Xiong Zhang26951ca2015-08-17 15:55:50 +080076static const u32 hpd_spt[HPD_NUM_PINS] = {
Ville Syrjälä74c0b392015-08-27 23:56:07 +030077 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
Xiong Zhang26951ca2015-08-17 15:55:50 +080078 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82};
83
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020084static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050085 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91};
92
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020093static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050094 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100};
101
Ville Syrjälä4bca26d2015-05-11 20:49:10 +0300102static const u32 hpd_status_i915[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -0500103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109};
110
Shashank Sharmae0a20ad2015-03-27 14:54:14 +0200111/* BXT hpd list */
112static const u32 hpd_bxt[HPD_NUM_PINS] = {
Sonika Jindal7f3561b2015-08-10 10:35:35 +0530113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
Shashank Sharmae0a20ad2015-03-27 14:54:14 +0200114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116};
117
Paulo Zanoni5c502442014-04-01 15:37:11 -0300118/* IIR can theoretically queue up two events. Be paranoid. */
Paulo Zanonif86f3fb2014-04-01 15:37:14 -0300119#define GEN8_IRQ_RESET_NDX(type, which) do { \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
127} while (0)
128
Paulo Zanonif86f3fb2014-04-01 15:37:14 -0300129#define GEN5_IRQ_RESET(type) do { \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300130 I915_WRITE(type##IMR, 0xffffffff); \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300131 POSTING_READ(type##IMR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300132 I915_WRITE(type##IER, 0); \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300137} while (0)
138
Paulo Zanoni337ba012014-04-01 15:37:16 -0300139/*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 i915_reg_t reg)
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300144{
145 u32 val = I915_READ(reg);
146
147 if (val == 0)
148 return;
149
150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200151 i915_mmio_reg_offset(reg), val);
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300152 I915_WRITE(reg, 0xffffffff);
153 POSTING_READ(reg);
154 I915_WRITE(reg, 0xffffffff);
155 POSTING_READ(reg);
156}
Paulo Zanoni337ba012014-04-01 15:37:16 -0300157
Paulo Zanoni35079892014-04-01 15:37:15 -0300158#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
Ville Syrjälä7d1bd5392014-10-30 19:42:50 +0200161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 POSTING_READ(GEN8_##type##_IMR(which)); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300163} while (0)
164
165#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300167 I915_WRITE(type##IER, (ier_val)); \
Ville Syrjälä7d1bd5392014-10-30 19:42:50 +0200168 I915_WRITE(type##IMR, (imr_val)); \
169 POSTING_READ(type##IMR); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300170} while (0)
171
Imre Deakc9a9a262014-11-05 20:48:37 +0200172static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
173
Egbert Eich0706f172015-09-23 16:15:27 +0200174/* For display hotplug interrupt */
175static inline void
176i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
177 uint32_t mask,
178 uint32_t bits)
179{
180 uint32_t val;
181
182 assert_spin_locked(&dev_priv->irq_lock);
183 WARN_ON(bits & ~mask);
184
185 val = I915_READ(PORT_HOTPLUG_EN);
186 val &= ~mask;
187 val |= bits;
188 I915_WRITE(PORT_HOTPLUG_EN, val);
189}
190
191/**
192 * i915_hotplug_interrupt_update - update hotplug interrupt enable
193 * @dev_priv: driver private
194 * @mask: bits to update
195 * @bits: bits to enable
196 * NOTE: the HPD enable bits are modified both inside and outside
197 * of an interrupt context. To avoid that read-modify-write cycles
198 * interfer, these bits are protected by a spinlock. Since this
199 * function is usually not called from a context where the lock is
200 * held already, this function acquires the lock itself. A non-locking
201 * version is also available.
202 */
203void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
204 uint32_t mask,
205 uint32_t bits)
206{
207 spin_lock_irq(&dev_priv->irq_lock);
208 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
209 spin_unlock_irq(&dev_priv->irq_lock);
210}
211
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300212/**
213 * ilk_update_display_irq - update DEIMR
214 * @dev_priv: driver private
215 * @interrupt_mask: mask of interrupt bits to update
216 * @enabled_irq_mask: mask of interrupt bits to enable
217 */
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +0200218void ilk_update_display_irq(struct drm_i915_private *dev_priv,
219 uint32_t interrupt_mask,
220 uint32_t enabled_irq_mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800221{
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300222 uint32_t new_val;
223
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200224 assert_spin_locked(&dev_priv->irq_lock);
225
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300226 WARN_ON(enabled_irq_mask & ~interrupt_mask);
227
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700228 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300229 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300230
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300231 new_val = dev_priv->irq_mask;
232 new_val &= ~interrupt_mask;
233 new_val |= (~enabled_irq_mask & interrupt_mask);
234
235 if (new_val != dev_priv->irq_mask) {
236 dev_priv->irq_mask = new_val;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000237 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000238 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800239 }
240}
241
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300242/**
243 * ilk_update_gt_irq - update GTIMR
244 * @dev_priv: driver private
245 * @interrupt_mask: mask of interrupt bits to update
246 * @enabled_irq_mask: mask of interrupt bits to enable
247 */
248static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
249 uint32_t interrupt_mask,
250 uint32_t enabled_irq_mask)
251{
252 assert_spin_locked(&dev_priv->irq_lock);
253
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100254 WARN_ON(enabled_irq_mask & ~interrupt_mask);
255
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700256 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300257 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300258
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300259 dev_priv->gt_irq_mask &= ~interrupt_mask;
260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
262 POSTING_READ(GTIMR);
263}
264
Daniel Vetter480c8032014-07-16 09:49:40 +0200265void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300266{
267 ilk_update_gt_irq(dev_priv, mask, mask);
268}
269
Daniel Vetter480c8032014-07-16 09:49:40 +0200270void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300271{
272 ilk_update_gt_irq(dev_priv, mask, 0);
273}
274
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200275static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200276{
277 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
278}
279
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200280static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
Imre Deaka72fbc32014-11-05 20:48:31 +0200281{
282 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
283}
284
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200285static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200286{
287 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
288}
289
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300290/**
Ville Syrjälä81fd8742015-11-25 16:21:30 +0200291 * snb_update_pm_irq - update GEN6_PMIMR
292 * @dev_priv: driver private
293 * @interrupt_mask: mask of interrupt bits to update
294 * @enabled_irq_mask: mask of interrupt bits to enable
295 */
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300296static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
297 uint32_t interrupt_mask,
298 uint32_t enabled_irq_mask)
299{
Paulo Zanoni605cd252013-08-06 18:57:15 -0300300 uint32_t new_val;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300301
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100302 WARN_ON(enabled_irq_mask & ~interrupt_mask);
303
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300304 assert_spin_locked(&dev_priv->irq_lock);
305
Paulo Zanoni605cd252013-08-06 18:57:15 -0300306 new_val = dev_priv->pm_irq_mask;
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300307 new_val &= ~interrupt_mask;
308 new_val |= (~enabled_irq_mask & interrupt_mask);
309
Paulo Zanoni605cd252013-08-06 18:57:15 -0300310 if (new_val != dev_priv->pm_irq_mask) {
311 dev_priv->pm_irq_mask = new_val;
Imre Deaka72fbc32014-11-05 20:48:31 +0200312 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
313 POSTING_READ(gen6_pm_imr(dev_priv));
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300314 }
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300315}
316
Daniel Vetter480c8032014-07-16 09:49:40 +0200317void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300318{
Imre Deak9939fba2014-11-20 23:01:47 +0200319 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
320 return;
321
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300322 snb_update_pm_irq(dev_priv, mask, mask);
323}
324
Imre Deak9939fba2014-11-20 23:01:47 +0200325static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
326 uint32_t mask)
327{
328 snb_update_pm_irq(dev_priv, mask, 0);
329}
330
Daniel Vetter480c8032014-07-16 09:49:40 +0200331void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300332{
Imre Deak9939fba2014-11-20 23:01:47 +0200333 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334 return;
335
336 __gen6_disable_pm_irq(dev_priv, mask);
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300337}
338
Chris Wilsondc979972016-05-10 14:10:04 +0100339void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
Imre Deak3cc134e2014-11-19 15:30:03 +0200340{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200341 i915_reg_t reg = gen6_pm_iir(dev_priv);
Imre Deak3cc134e2014-11-19 15:30:03 +0200342
343 spin_lock_irq(&dev_priv->irq_lock);
344 I915_WRITE(reg, dev_priv->pm_rps_events);
345 I915_WRITE(reg, dev_priv->pm_rps_events);
346 POSTING_READ(reg);
Imre Deak096fad92015-03-23 19:11:35 +0200347 dev_priv->rps.pm_iir = 0;
Imre Deak3cc134e2014-11-19 15:30:03 +0200348 spin_unlock_irq(&dev_priv->irq_lock);
349}
350
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100351void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200352{
Imre Deakb900b942014-11-05 20:48:48 +0200353 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak78e68d32014-12-15 18:59:27 +0200354
Imre Deakb900b942014-11-05 20:48:48 +0200355 WARN_ON(dev_priv->rps.pm_iir);
Imre Deak3cc134e2014-11-19 15:30:03 +0200356 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
Imre Deakd4d70aa2014-11-19 15:30:04 +0200357 dev_priv->rps.interrupts_enabled = true;
Imre Deak78e68d32014-12-15 18:59:27 +0200358 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
359 dev_priv->pm_rps_events);
Imre Deakb900b942014-11-05 20:48:48 +0200360 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Imre Deak78e68d32014-12-15 18:59:27 +0200361
Imre Deakb900b942014-11-05 20:48:48 +0200362 spin_unlock_irq(&dev_priv->irq_lock);
363}
364
Imre Deak59d02a12014-12-19 19:33:26 +0200365u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
366{
367 /*
Imre Deakf24eeb12014-12-19 19:33:27 +0200368 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
Imre Deak59d02a12014-12-19 19:33:26 +0200369 * if GEN6_PM_UP_EI_EXPIRED is masked.
Imre Deakf24eeb12014-12-19 19:33:27 +0200370 *
371 * TODO: verify if this can be reproduced on VLV,CHV.
Imre Deak59d02a12014-12-19 19:33:26 +0200372 */
373 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
374 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
375
376 if (INTEL_INFO(dev_priv)->gen >= 8)
377 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
378
379 return mask;
380}
381
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100382void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200383{
Imre Deakd4d70aa2014-11-19 15:30:04 +0200384 spin_lock_irq(&dev_priv->irq_lock);
385 dev_priv->rps.interrupts_enabled = false;
386 spin_unlock_irq(&dev_priv->irq_lock);
387
388 cancel_work_sync(&dev_priv->rps.work);
389
Imre Deak9939fba2014-11-20 23:01:47 +0200390 spin_lock_irq(&dev_priv->irq_lock);
391
Imre Deak59d02a12014-12-19 19:33:26 +0200392 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
Imre Deak9939fba2014-11-20 23:01:47 +0200393
394 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Imre Deakb900b942014-11-05 20:48:48 +0200395 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
396 ~dev_priv->pm_rps_events);
Imre Deak58072cc2015-03-23 19:11:34 +0200397
398 spin_unlock_irq(&dev_priv->irq_lock);
399
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100400 synchronize_irq(dev_priv->dev->irq);
Imre Deakb900b942014-11-05 20:48:48 +0200401}
402
Ben Widawsky09610212014-05-15 20:58:08 +0300403/**
Ville Syrjälä81fd8742015-11-25 16:21:30 +0200404 * bdw_update_port_irq - update DE port interrupt
405 * @dev_priv: driver private
406 * @interrupt_mask: mask of interrupt bits to update
407 * @enabled_irq_mask: mask of interrupt bits to enable
408 */
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +0300409static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
410 uint32_t interrupt_mask,
411 uint32_t enabled_irq_mask)
412{
413 uint32_t new_val;
414 uint32_t old_val;
415
416 assert_spin_locked(&dev_priv->irq_lock);
417
418 WARN_ON(enabled_irq_mask & ~interrupt_mask);
419
420 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
421 return;
422
423 old_val = I915_READ(GEN8_DE_PORT_IMR);
424
425 new_val = old_val;
426 new_val &= ~interrupt_mask;
427 new_val |= (~enabled_irq_mask & interrupt_mask);
428
429 if (new_val != old_val) {
430 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
431 POSTING_READ(GEN8_DE_PORT_IMR);
432 }
433}
434
435/**
Ville Syrjälä013d3752015-11-23 18:06:17 +0200436 * bdw_update_pipe_irq - update DE pipe interrupt
437 * @dev_priv: driver private
438 * @pipe: pipe whose interrupt to update
439 * @interrupt_mask: mask of interrupt bits to update
440 * @enabled_irq_mask: mask of interrupt bits to enable
441 */
442void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
443 enum pipe pipe,
444 uint32_t interrupt_mask,
445 uint32_t enabled_irq_mask)
446{
447 uint32_t new_val;
448
449 assert_spin_locked(&dev_priv->irq_lock);
450
451 WARN_ON(enabled_irq_mask & ~interrupt_mask);
452
453 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
454 return;
455
456 new_val = dev_priv->de_irq_mask[pipe];
457 new_val &= ~interrupt_mask;
458 new_val |= (~enabled_irq_mask & interrupt_mask);
459
460 if (new_val != dev_priv->de_irq_mask[pipe]) {
461 dev_priv->de_irq_mask[pipe] = new_val;
462 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
463 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
464 }
465}
466
467/**
Daniel Vetterfee884e2013-07-04 23:35:21 +0200468 * ibx_display_interrupt_update - update SDEIMR
469 * @dev_priv: driver private
470 * @interrupt_mask: mask of interrupt bits to update
471 * @enabled_irq_mask: mask of interrupt bits to enable
472 */
Daniel Vetter47339cd2014-09-30 10:56:46 +0200473void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
474 uint32_t interrupt_mask,
475 uint32_t enabled_irq_mask)
Daniel Vetterfee884e2013-07-04 23:35:21 +0200476{
477 uint32_t sdeimr = I915_READ(SDEIMR);
478 sdeimr &= ~interrupt_mask;
479 sdeimr |= (~enabled_irq_mask & interrupt_mask);
480
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100481 WARN_ON(enabled_irq_mask & ~interrupt_mask);
482
Daniel Vetterfee884e2013-07-04 23:35:21 +0200483 assert_spin_locked(&dev_priv->irq_lock);
484
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700485 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300486 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300487
Daniel Vetterfee884e2013-07-04 23:35:21 +0200488 I915_WRITE(SDEIMR, sdeimr);
489 POSTING_READ(SDEIMR);
490}
Paulo Zanoni86642812013-04-12 17:57:57 -0300491
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100492static void
Imre Deak755e9012014-02-10 18:42:47 +0200493__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
494 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800495{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200496 i915_reg_t reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200497 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800498
Daniel Vetterb79480b2013-06-27 17:52:10 +0200499 assert_spin_locked(&dev_priv->irq_lock);
Daniel Vetterd518ce52014-08-27 10:43:37 +0200500 WARN_ON(!intel_irqs_enabled(dev_priv));
Daniel Vetterb79480b2013-06-27 17:52:10 +0200501
Ville Syrjälä04feced2014-04-03 13:28:33 +0300502 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
503 status_mask & ~PIPESTAT_INT_STATUS_MASK,
504 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
505 pipe_name(pipe), enable_mask, status_mask))
Imre Deak755e9012014-02-10 18:42:47 +0200506 return;
507
508 if ((pipestat & enable_mask) == enable_mask)
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200509 return;
510
Imre Deak91d181d2014-02-10 18:42:49 +0200511 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
512
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200513 /* Enable the interrupt, clear any pending status */
Imre Deak755e9012014-02-10 18:42:47 +0200514 pipestat |= enable_mask | status_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200515 I915_WRITE(reg, pipestat);
516 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800517}
518
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100519static void
Imre Deak755e9012014-02-10 18:42:47 +0200520__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
521 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800522{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200523 i915_reg_t reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200524 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800525
Daniel Vetterb79480b2013-06-27 17:52:10 +0200526 assert_spin_locked(&dev_priv->irq_lock);
Daniel Vetterd518ce52014-08-27 10:43:37 +0200527 WARN_ON(!intel_irqs_enabled(dev_priv));
Daniel Vetterb79480b2013-06-27 17:52:10 +0200528
Ville Syrjälä04feced2014-04-03 13:28:33 +0300529 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
530 status_mask & ~PIPESTAT_INT_STATUS_MASK,
531 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
532 pipe_name(pipe), enable_mask, status_mask))
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200533 return;
534
Imre Deak755e9012014-02-10 18:42:47 +0200535 if ((pipestat & enable_mask) == 0)
536 return;
537
Imre Deak91d181d2014-02-10 18:42:49 +0200538 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
539
Imre Deak755e9012014-02-10 18:42:47 +0200540 pipestat &= ~enable_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200541 I915_WRITE(reg, pipestat);
542 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800543}
544
Imre Deak10c59c52014-02-10 18:42:48 +0200545static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
546{
547 u32 enable_mask = status_mask << 16;
548
549 /*
Ville Syrjälä724a6902014-04-09 13:28:48 +0300550 * On pipe A we don't support the PSR interrupt yet,
551 * on pipe B and C the same bit MBZ.
Imre Deak10c59c52014-02-10 18:42:48 +0200552 */
553 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
554 return 0;
Ville Syrjälä724a6902014-04-09 13:28:48 +0300555 /*
556 * On pipe B and C we don't support the PSR interrupt yet, on pipe
557 * A the same bit is for perf counters which we don't use either.
558 */
559 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
560 return 0;
Imre Deak10c59c52014-02-10 18:42:48 +0200561
562 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
563 SPRITE0_FLIP_DONE_INT_EN_VLV |
564 SPRITE1_FLIP_DONE_INT_EN_VLV);
565 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
566 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
567 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
568 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
569
570 return enable_mask;
571}
572
Imre Deak755e9012014-02-10 18:42:47 +0200573void
574i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
575 u32 status_mask)
576{
577 u32 enable_mask;
578
Wayne Boyer666a4532015-12-09 12:29:35 -0800579 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Imre Deak10c59c52014-02-10 18:42:48 +0200580 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
581 status_mask);
582 else
583 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200584 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
585}
586
587void
588i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
589 u32 status_mask)
590{
591 u32 enable_mask;
592
Wayne Boyer666a4532015-12-09 12:29:35 -0800593 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Imre Deak10c59c52014-02-10 18:42:48 +0200594 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
595 status_mask);
596 else
597 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200598 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
599}
600
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000601/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300602 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Javier Martinez Canillas468f9d22015-10-08 09:54:44 +0200603 * @dev: drm device
Zhao Yakui01c66882009-10-28 05:10:00 +0000604 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100605static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
Zhao Yakui01c66882009-10-28 05:10:00 +0000606{
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100607 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300608 return;
609
Daniel Vetter13321782014-09-15 14:55:29 +0200610 spin_lock_irq(&dev_priv->irq_lock);
Zhao Yakui01c66882009-10-28 05:10:00 +0000611
Imre Deak755e9012014-02-10 18:42:47 +0200612 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100613 if (INTEL_GEN(dev_priv) >= 4)
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200614 i915_enable_pipestat(dev_priv, PIPE_A,
Imre Deak755e9012014-02-10 18:42:47 +0200615 PIPE_LEGACY_BLC_EVENT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000616
Daniel Vetter13321782014-09-15 14:55:29 +0200617 spin_unlock_irq(&dev_priv->irq_lock);
Zhao Yakui01c66882009-10-28 05:10:00 +0000618}
619
Ville Syrjäläf75f3742014-05-15 20:20:36 +0300620/*
621 * This timing diagram depicts the video signal in and
622 * around the vertical blanking period.
623 *
624 * Assumptions about the fictitious mode used in this example:
625 * vblank_start >= 3
626 * vsync_start = vblank_start + 1
627 * vsync_end = vblank_start + 2
628 * vtotal = vblank_start + 3
629 *
630 * start of vblank:
631 * latch double buffered registers
632 * increment frame counter (ctg+)
633 * generate start of vblank interrupt (gen4+)
634 * |
635 * | frame start:
636 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
637 * | may be shifted forward 1-3 extra lines via PIPECONF
638 * | |
639 * | | start of vsync:
640 * | | generate vsync interrupt
641 * | | |
642 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
643 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
644 * ----va---> <-----------------vb--------------------> <--------va-------------
645 * | | <----vs-----> |
646 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
647 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
648 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
649 * | | |
650 * last visible pixel first visible pixel
651 * | increment frame counter (gen3/4)
652 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
653 *
654 * x = horizontal active
655 * _ = horizontal blanking
656 * hs = horizontal sync
657 * va = vertical active
658 * vb = vertical blanking
659 * vs = vertical sync
660 * vbs = vblank_start (number)
661 *
662 * Summary:
663 * - most events happen at the start of horizontal sync
664 * - frame start happens at the start of horizontal blank, 1-4 lines
665 * (depending on PIPECONF settings) after the start of vblank
666 * - gen3/4 pixel and frame counter are synchronized with the start
667 * of horizontal active on the first line of vertical active
668 */
669
Thierry Reding88e72712015-09-24 18:35:31 +0200670static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +0300671{
672 /* Gen2 doesn't have a hardware frame counter */
673 return 0;
674}
675
Keith Packard42f52ef2008-10-18 19:39:29 -0700676/* Called from drm generic code, passed a 'crtc', which
677 * we use as a pipe index
678 */
Thierry Reding88e72712015-09-24 18:35:31 +0200679static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700680{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300681 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200682 i915_reg_t high_frame, low_frame;
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300683 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
Daniel Vetterf3a5c3f2015-02-13 21:03:44 +0100684 struct intel_crtc *intel_crtc =
685 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
Maarten Lankhorstfc467a222015-06-01 12:50:07 +0200686 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700687
Daniel Vetterf3a5c3f2015-02-13 21:03:44 +0100688 htotal = mode->crtc_htotal;
689 hsync_start = mode->crtc_hsync_start;
690 vbl_start = mode->crtc_vblank_start;
691 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
692 vbl_start = DIV_ROUND_UP(vbl_start, 2);
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300693
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300694 /* Convert to pixel count */
695 vbl_start *= htotal;
696
697 /* Start of vblank event occurs at start of hsync */
698 vbl_start -= htotal - hsync_start;
699
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800700 high_frame = PIPEFRAME(pipe);
701 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100702
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700703 /*
704 * High & low register fields aren't synchronized, so make sure
705 * we get a low value that's stable across two reads of the high
706 * register.
707 */
708 do {
Chris Wilson5eddb702010-09-11 13:48:45 +0100709 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300710 low = I915_READ(low_frame);
Chris Wilson5eddb702010-09-11 13:48:45 +0100711 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700712 } while (high1 != high2);
713
Chris Wilson5eddb702010-09-11 13:48:45 +0100714 high1 >>= PIPE_FRAME_HIGH_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300715 pixel = low & PIPE_PIXEL_MASK;
Chris Wilson5eddb702010-09-11 13:48:45 +0100716 low >>= PIPE_FRAME_LOW_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300717
718 /*
719 * The frame counter increments at beginning of active.
720 * Cook up a vblank counter by also checking the pixel
721 * counter against vblank start.
722 */
Ville Syrjäläedc08d02013-11-06 13:56:27 -0200723 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700724}
725
Dave Airlie974e59b2015-10-30 09:45:33 +1000726static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800727{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300728 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800729
Ville Syrjälä649636e2015-09-22 19:50:01 +0300730 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800731}
732
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300733/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
Ville Syrjäläa225f072014-04-29 13:35:45 +0300734static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
735{
736 struct drm_device *dev = crtc->base.dev;
737 struct drm_i915_private *dev_priv = dev->dev_private;
Maarten Lankhorstfc467a222015-06-01 12:50:07 +0200738 const struct drm_display_mode *mode = &crtc->base.hwmode;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300739 enum pipe pipe = crtc->pipe;
Ville Syrjälä80715b22014-05-15 20:23:23 +0300740 int position, vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300741
Ville Syrjälä80715b22014-05-15 20:23:23 +0300742 vtotal = mode->crtc_vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300743 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
744 vtotal /= 2;
745
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100746 if (IS_GEN2(dev_priv))
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300747 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300748 else
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300749 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300750
751 /*
Jesse Barnes41b578f2015-09-22 12:15:54 -0700752 * On HSW, the DSL reg (0x70000) appears to return 0 if we
753 * read it just before the start of vblank. So try it again
754 * so we don't accidentally end up spanning a vblank frame
755 * increment, causing the pipe_update_end() code to squak at us.
756 *
757 * The nature of this problem means we can't simply check the ISR
758 * bit and return the vblank start value; nor can we use the scanline
759 * debug register in the transcoder as it appears to have the same
760 * problem. We may need to extend this to include other platforms,
761 * but so far testing only shows the problem on HSW.
762 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100763 if (HAS_DDI(dev_priv) && !position) {
Jesse Barnes41b578f2015-09-22 12:15:54 -0700764 int i, temp;
765
766 for (i = 0; i < 100; i++) {
767 udelay(1);
768 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
769 DSL_LINEMASK_GEN3;
770 if (temp != position) {
771 position = temp;
772 break;
773 }
774 }
775 }
776
777 /*
Ville Syrjälä80715b22014-05-15 20:23:23 +0300778 * See update_scanline_offset() for the details on the
779 * scanline_offset adjustment.
Ville Syrjäläa225f072014-04-29 13:35:45 +0300780 */
Ville Syrjälä80715b22014-05-15 20:23:23 +0300781 return (position + crtc->scanline_offset) % vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300782}
783
Thierry Reding88e72712015-09-24 18:35:31 +0200784static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
Ville Syrjäläabca9e42013-10-28 20:50:48 +0200785 unsigned int flags, int *vpos, int *hpos,
Ville Syrjälä3bb403b2015-09-14 22:43:44 +0300786 ktime_t *stime, ktime_t *etime,
787 const struct drm_display_mode *mode)
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100788{
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300789 struct drm_i915_private *dev_priv = dev->dev_private;
790 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
791 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300792 int position;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300793 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100794 bool in_vbl = true;
795 int ret = 0;
Mario Kleinerad3543e2013-10-30 05:13:08 +0100796 unsigned long irqflags;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100797
Maarten Lankhorstfc467a222015-06-01 12:50:07 +0200798 if (WARN_ON(!mode->crtc_clock)) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100799 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800800 "pipe %c\n", pipe_name(pipe));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100801 return 0;
802 }
803
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300804 htotal = mode->crtc_htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300805 hsync_start = mode->crtc_hsync_start;
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300806 vtotal = mode->crtc_vtotal;
807 vbl_start = mode->crtc_vblank_start;
808 vbl_end = mode->crtc_vblank_end;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100809
Ville Syrjäläd31faf62013-10-28 16:31:41 +0200810 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
811 vbl_start = DIV_ROUND_UP(vbl_start, 2);
812 vbl_end /= 2;
813 vtotal /= 2;
814 }
815
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300816 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
817
Mario Kleinerad3543e2013-10-30 05:13:08 +0100818 /*
819 * Lock uncore.lock, as we will do multiple timing critical raw
820 * register reads, potentially with preemption disabled, so the
821 * following code must not block on uncore.lock.
822 */
823 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300824
Mario Kleinerad3543e2013-10-30 05:13:08 +0100825 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
826
827 /* Get optional system timestamp before query. */
828 if (stime)
829 *stime = ktime_get();
830
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100831 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100832 /* No obvious pixelcount register. Only query vertical
833 * scanout position from Display scan line register.
834 */
Ville Syrjäläa225f072014-04-29 13:35:45 +0300835 position = __intel_get_crtc_scanline(intel_crtc);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100836 } else {
837 /* Have access to pixelcount since start of frame.
838 * We can split this into vertical and horizontal
839 * scanout position.
840 */
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300841 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100842
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300843 /* convert to pixel counts */
844 vbl_start *= htotal;
845 vbl_end *= htotal;
846 vtotal *= htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300847
848 /*
Ville Syrjälä7e78f1cb2014-04-29 13:35:49 +0300849 * In interlaced modes, the pixel counter counts all pixels,
850 * so one field will have htotal more pixels. In order to avoid
851 * the reported position from jumping backwards when the pixel
852 * counter is beyond the length of the shorter field, just
853 * clamp the position the length of the shorter field. This
854 * matches how the scanline counter based position works since
855 * the scanline counter doesn't count the two half lines.
856 */
857 if (position >= vtotal)
858 position = vtotal - 1;
859
860 /*
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300861 * Start of vblank interrupt is triggered at start of hsync,
862 * just prior to the first active line of vblank. However we
863 * consider lines to start at the leading edge of horizontal
864 * active. So, should we get here before we've crossed into
865 * the horizontal active of the first line in vblank, we would
866 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
867 * always add htotal-hsync_start to the current pixel position.
868 */
869 position = (position + htotal - hsync_start) % vtotal;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300870 }
871
Mario Kleinerad3543e2013-10-30 05:13:08 +0100872 /* Get optional system timestamp after query. */
873 if (etime)
874 *etime = ktime_get();
875
876 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
877
878 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
879
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300880 in_vbl = position >= vbl_start && position < vbl_end;
881
882 /*
883 * While in vblank, position will be negative
884 * counting up towards 0 at vbl_end. And outside
885 * vblank, position will be positive counting
886 * up since vbl_end.
887 */
888 if (position >= vbl_start)
889 position -= vbl_end;
890 else
891 position += vtotal - vbl_end;
892
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100893 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300894 *vpos = position;
895 *hpos = 0;
896 } else {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100897 *vpos = position / htotal;
898 *hpos = position - (*vpos * htotal);
899 }
900
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100901 /* In vblank? */
902 if (in_vbl)
Daniel Vetter3d3cbd82014-09-10 17:36:11 +0200903 ret |= DRM_SCANOUTPOS_IN_VBLANK;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100904
905 return ret;
906}
907
Ville Syrjäläa225f072014-04-29 13:35:45 +0300908int intel_get_crtc_scanline(struct intel_crtc *crtc)
909{
910 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
911 unsigned long irqflags;
912 int position;
913
914 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
915 position = __intel_get_crtc_scanline(crtc);
916 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
917
918 return position;
919}
920
Thierry Reding88e72712015-09-24 18:35:31 +0200921static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100922 int *max_error,
923 struct timeval *vblank_time,
924 unsigned flags)
925{
Chris Wilson4041b852011-01-22 10:07:56 +0000926 struct drm_crtc *crtc;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100927
Thierry Reding88e72712015-09-24 18:35:31 +0200928 if (pipe >= INTEL_INFO(dev)->num_pipes) {
929 DRM_ERROR("Invalid crtc %u\n", pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100930 return -EINVAL;
931 }
932
933 /* Get drm_crtc to timestamp: */
Chris Wilson4041b852011-01-22 10:07:56 +0000934 crtc = intel_get_crtc_for_pipe(dev, pipe);
935 if (crtc == NULL) {
Thierry Reding88e72712015-09-24 18:35:31 +0200936 DRM_ERROR("Invalid crtc %u\n", pipe);
Chris Wilson4041b852011-01-22 10:07:56 +0000937 return -EINVAL;
938 }
939
Maarten Lankhorstfc467a222015-06-01 12:50:07 +0200940 if (!crtc->hwmode.crtc_clock) {
Thierry Reding88e72712015-09-24 18:35:31 +0200941 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
Chris Wilson4041b852011-01-22 10:07:56 +0000942 return -EBUSY;
943 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100944
945 /* Helper routine in DRM core does all the work: */
Chris Wilson4041b852011-01-22 10:07:56 +0000946 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
947 vblank_time, flags,
Maarten Lankhorstfc467a222015-06-01 12:50:07 +0200948 &crtc->hwmode);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100949}
950
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100951static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800952{
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000953 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +0200954 u8 new_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200955
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200956 spin_lock(&mchdev_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800957
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200958 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
959
Daniel Vetter20e4d402012-08-08 23:35:39 +0200960 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200961
Jesse Barnes7648fa92010-05-20 14:28:11 -0700962 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000963 busy_up = I915_READ(RCPREVBSYTUPAVG);
964 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800965 max_avg = I915_READ(RCBMAXAVG);
966 min_avg = I915_READ(RCBMINAVG);
967
968 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000969 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200970 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
971 new_delay = dev_priv->ips.cur_delay - 1;
972 if (new_delay < dev_priv->ips.max_delay)
973 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000974 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200975 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
976 new_delay = dev_priv->ips.cur_delay + 1;
977 if (new_delay > dev_priv->ips.min_delay)
978 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800979 }
980
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100981 if (ironlake_set_drps(dev_priv, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +0200982 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800983
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200984 spin_unlock(&mchdev_lock);
Daniel Vetter92703882012-08-09 16:46:01 +0200985
Jesse Barnesf97108d2010-01-29 11:27:07 -0800986 return;
987}
988
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000989static void notify_ring(struct intel_engine_cs *engine)
Chris Wilson549f7362010-10-19 11:19:32 +0100990{
Tvrtko Ursulin117897f2016-03-16 11:00:40 +0000991 if (!intel_engine_initialized(engine))
Chris Wilson475553d2011-01-20 09:52:56 +0000992 return;
993
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000994 trace_i915_gem_request_notify(engine);
Chris Wilson12471ba2016-04-09 10:57:55 +0100995 engine->user_interrupts++;
Chris Wilson9862e602011-01-04 22:22:17 +0000996
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000997 wake_up_all(&engine->irq_queue);
Chris Wilson549f7362010-10-19 11:19:32 +0100998}
999
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001000static void vlv_c0_read(struct drm_i915_private *dev_priv,
1001 struct intel_rps_ei *ei)
Deepak S31685c22014-07-03 17:33:01 -04001002{
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001003 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1004 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1005 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
Deepak S31685c22014-07-03 17:33:01 -04001006}
1007
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001008static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1009 const struct intel_rps_ei *old,
1010 const struct intel_rps_ei *now,
1011 int threshold)
Deepak S31685c22014-07-03 17:33:01 -04001012{
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001013 u64 time, c0;
Ville Syrjälä7bad74d2015-09-24 23:29:20 +03001014 unsigned int mul = 100;
Deepak S31685c22014-07-03 17:33:01 -04001015
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001016 if (old->cz_clock == 0)
1017 return false;
Deepak S31685c22014-07-03 17:33:01 -04001018
Ville Syrjälä7bad74d2015-09-24 23:29:20 +03001019 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1020 mul <<= 8;
1021
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001022 time = now->cz_clock - old->cz_clock;
Ville Syrjälä7bad74d2015-09-24 23:29:20 +03001023 time *= threshold * dev_priv->czclk_freq;
Deepak S31685c22014-07-03 17:33:01 -04001024
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001025 /* Workload can be split between render + media, e.g. SwapBuffers
1026 * being blitted in X after being rendered in mesa. To account for
1027 * this we need to combine both engines into our activity counter.
1028 */
1029 c0 = now->render_c0 - old->render_c0;
1030 c0 += now->media_c0 - old->media_c0;
Ville Syrjälä7bad74d2015-09-24 23:29:20 +03001031 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
Deepak S31685c22014-07-03 17:33:01 -04001032
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001033 return c0 >= time;
1034}
Deepak S31685c22014-07-03 17:33:01 -04001035
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001036void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1037{
1038 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1039 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001040}
1041
1042static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1043{
1044 struct intel_rps_ei now;
1045 u32 events = 0;
1046
Chris Wilson6f4b12f82015-03-18 09:48:23 +00001047 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001048 return 0;
1049
1050 vlv_c0_read(dev_priv, &now);
1051 if (now.cz_clock == 0)
1052 return 0;
Deepak S31685c22014-07-03 17:33:01 -04001053
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001054 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1055 if (!vlv_c0_above(dev_priv,
1056 &dev_priv->rps.down_ei, &now,
Chris Wilson8fb55192015-04-07 16:20:28 +01001057 dev_priv->rps.down_threshold))
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001058 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1059 dev_priv->rps.down_ei = now;
Deepak S31685c22014-07-03 17:33:01 -04001060 }
1061
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001062 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1063 if (vlv_c0_above(dev_priv,
1064 &dev_priv->rps.up_ei, &now,
Chris Wilson8fb55192015-04-07 16:20:28 +01001065 dev_priv->rps.up_threshold))
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001066 events |= GEN6_PM_RP_UP_THRESHOLD;
1067 dev_priv->rps.up_ei = now;
1068 }
1069
1070 return events;
Deepak S31685c22014-07-03 17:33:01 -04001071}
1072
Chris Wilsonf5a4c672015-04-27 13:41:23 +01001073static bool any_waiters(struct drm_i915_private *dev_priv)
1074{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001075 struct intel_engine_cs *engine;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01001076
Dave Gordonb4ac5af2016-03-24 11:20:38 +00001077 for_each_engine(engine, dev_priv)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001078 if (engine->irq_refcount)
Chris Wilsonf5a4c672015-04-27 13:41:23 +01001079 return true;
1080
1081 return false;
1082}
1083
Ben Widawsky4912d042011-04-25 11:25:20 -07001084static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001085{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001086 struct drm_i915_private *dev_priv =
1087 container_of(work, struct drm_i915_private, rps.work);
Chris Wilson8d3afd72015-05-21 21:01:47 +01001088 bool client_boost;
1089 int new_delay, adj, min, max;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -03001090 u32 pm_iir;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001091
Daniel Vetter59cdb632013-07-04 23:35:28 +02001092 spin_lock_irq(&dev_priv->irq_lock);
Imre Deakd4d70aa2014-11-19 15:30:04 +02001093 /* Speed up work cancelation during disabling rps interrupts. */
1094 if (!dev_priv->rps.interrupts_enabled) {
1095 spin_unlock_irq(&dev_priv->irq_lock);
1096 return;
1097 }
Imre Deak1f814da2015-12-16 02:52:19 +02001098
1099 /*
1100 * The RPS work is synced during runtime suspend, we don't require a
1101 * wakeref. TODO: instead of disabling the asserts make sure that we
1102 * always hold an RPM reference while the work is running.
1103 */
1104 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1105
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001106 pm_iir = dev_priv->rps.pm_iir;
1107 dev_priv->rps.pm_iir = 0;
Imre Deaka72fbc32014-11-05 20:48:31 +02001108 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1109 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Chris Wilson8d3afd72015-05-21 21:01:47 +01001110 client_boost = dev_priv->rps.client_boost;
1111 dev_priv->rps.client_boost = false;
Daniel Vetter59cdb632013-07-04 23:35:28 +02001112 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawsky4912d042011-04-25 11:25:20 -07001113
Paulo Zanoni60611c12013-08-15 11:50:01 -03001114 /* Make sure we didn't queue anything we're not going to process. */
Deepak Sa6706b42014-03-15 20:23:22 +05301115 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
Paulo Zanoni60611c12013-08-15 11:50:01 -03001116
Chris Wilson8d3afd72015-05-21 21:01:47 +01001117 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
Imre Deak1f814da2015-12-16 02:52:19 +02001118 goto out;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001119
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001120 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01001121
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001122 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1123
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001124 adj = dev_priv->rps.last_adj;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001125 new_delay = dev_priv->rps.cur_freq;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001126 min = dev_priv->rps.min_freq_softlimit;
1127 max = dev_priv->rps.max_freq_softlimit;
1128
1129 if (client_boost) {
1130 new_delay = dev_priv->rps.max_freq_softlimit;
1131 adj = 0;
1132 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001133 if (adj > 0)
1134 adj *= 2;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001135 else /* CHV needs even encode values */
1136 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
Ville Syrjälä74250342013-06-25 21:38:11 +03001137 /*
1138 * For better performance, jump directly
1139 * to RPe if we're below it.
1140 */
Chris Wilsonedcf2842015-04-07 16:20:29 +01001141 if (new_delay < dev_priv->rps.efficient_freq - adj) {
Ben Widawskyb39fb292014-03-19 18:31:11 -07001142 new_delay = dev_priv->rps.efficient_freq;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001143 adj = 0;
1144 }
Chris Wilsonf5a4c672015-04-27 13:41:23 +01001145 } else if (any_waiters(dev_priv)) {
1146 adj = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001147 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
Ben Widawskyb39fb292014-03-19 18:31:11 -07001148 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1149 new_delay = dev_priv->rps.efficient_freq;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001150 else
Ben Widawskyb39fb292014-03-19 18:31:11 -07001151 new_delay = dev_priv->rps.min_freq_softlimit;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001152 adj = 0;
1153 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1154 if (adj < 0)
1155 adj *= 2;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001156 else /* CHV needs even encode values */
1157 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001158 } else { /* unknown event */
Chris Wilsonedcf2842015-04-07 16:20:29 +01001159 adj = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001160 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001161
Chris Wilsonedcf2842015-04-07 16:20:29 +01001162 dev_priv->rps.last_adj = adj;
1163
Ben Widawsky79249632012-09-07 19:43:42 -07001164 /* sysfs frequency interfaces may have snuck in while servicing the
1165 * interrupt
1166 */
Chris Wilsonedcf2842015-04-07 16:20:29 +01001167 new_delay += adj;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001168 new_delay = clamp_t(int, new_delay, min, max);
Deepak S27544362014-01-27 21:35:05 +05301169
Chris Wilsondc979972016-05-10 14:10:04 +01001170 intel_set_rps(dev_priv, new_delay);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001171
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001172 mutex_unlock(&dev_priv->rps.hw_lock);
Imre Deak1f814da2015-12-16 02:52:19 +02001173out:
1174 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001175}
1176
Ben Widawskye3689192012-05-25 16:56:22 -07001177
1178/**
1179 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1180 * occurred.
1181 * @work: workqueue struct
1182 *
1183 * Doesn't actually do anything except notify userspace. As a consequence of
1184 * this event, userspace should try to remap the bad rows since statistically
1185 * it is likely the same row is more likely to go bad again.
1186 */
1187static void ivybridge_parity_work(struct work_struct *work)
1188{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001189 struct drm_i915_private *dev_priv =
1190 container_of(work, struct drm_i915_private, l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001191 u32 error_status, row, bank, subbank;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001192 char *parity_event[6];
Ben Widawskye3689192012-05-25 16:56:22 -07001193 uint32_t misccpctl;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001194 uint8_t slice = 0;
Ben Widawskye3689192012-05-25 16:56:22 -07001195
1196 /* We must turn off DOP level clock gating to access the L3 registers.
1197 * In order to prevent a get/put style interface, acquire struct mutex
1198 * any time we access those registers.
1199 */
1200 mutex_lock(&dev_priv->dev->struct_mutex);
1201
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001202 /* If we've screwed up tracking, just let the interrupt fire again */
1203 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1204 goto out;
1205
Ben Widawskye3689192012-05-25 16:56:22 -07001206 misccpctl = I915_READ(GEN7_MISCCPCTL);
1207 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1208 POSTING_READ(GEN7_MISCCPCTL);
1209
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001210 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001211 i915_reg_t reg;
Ben Widawskye3689192012-05-25 16:56:22 -07001212
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001213 slice--;
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001214 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001215 break;
1216
1217 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1218
Ville Syrjälä6fa1c5f2015-11-04 23:20:02 +02001219 reg = GEN7_L3CDERRST1(slice);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001220
1221 error_status = I915_READ(reg);
1222 row = GEN7_PARITY_ERROR_ROW(error_status);
1223 bank = GEN7_PARITY_ERROR_BANK(error_status);
1224 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1225
1226 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1227 POSTING_READ(reg);
1228
1229 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1230 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1231 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1232 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1233 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1234 parity_event[5] = NULL;
1235
Dave Airlie5bdebb12013-10-11 14:07:25 +10001236 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001237 KOBJ_CHANGE, parity_event);
1238
1239 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1240 slice, row, bank, subbank);
1241
1242 kfree(parity_event[4]);
1243 kfree(parity_event[3]);
1244 kfree(parity_event[2]);
1245 kfree(parity_event[1]);
1246 }
Ben Widawskye3689192012-05-25 16:56:22 -07001247
1248 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1249
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001250out:
1251 WARN_ON(dev_priv->l3_parity.which_slice);
Daniel Vetter4cb21832014-09-15 14:55:26 +02001252 spin_lock_irq(&dev_priv->irq_lock);
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001253 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
Daniel Vetter4cb21832014-09-15 14:55:26 +02001254 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001255
1256 mutex_unlock(&dev_priv->dev->struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001257}
1258
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001259static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1260 u32 iir)
Ben Widawskye3689192012-05-25 16:56:22 -07001261{
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001262 if (!HAS_L3_DPF(dev_priv))
Ben Widawskye3689192012-05-25 16:56:22 -07001263 return;
1264
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001265 spin_lock(&dev_priv->irq_lock);
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001266 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001267 spin_unlock(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001268
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001269 iir &= GT_PARITY_ERROR(dev_priv);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001270 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1271 dev_priv->l3_parity.which_slice |= 1 << 1;
1272
1273 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1274 dev_priv->l3_parity.which_slice |= 1 << 0;
1275
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001276 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001277}
1278
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001279static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001280 u32 gt_iir)
1281{
1282 if (gt_iir &
1283 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001284 notify_ring(&dev_priv->engine[RCS]);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001285 if (gt_iir & ILK_BSD_USER_INTERRUPT)
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001286 notify_ring(&dev_priv->engine[VCS]);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001287}
1288
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001289static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001290 u32 gt_iir)
1291{
1292
Ben Widawskycc609d52013-05-28 19:22:29 -07001293 if (gt_iir &
1294 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001295 notify_ring(&dev_priv->engine[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001296 if (gt_iir & GT_BSD_USER_INTERRUPT)
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001297 notify_ring(&dev_priv->engine[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001298 if (gt_iir & GT_BLT_USER_INTERRUPT)
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001299 notify_ring(&dev_priv->engine[BCS]);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001300
Ben Widawskycc609d52013-05-28 19:22:29 -07001301 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1302 GT_BSD_CS_ERROR_INTERRUPT |
Daniel Vetteraaecdf62014-11-04 15:52:22 +01001303 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1304 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
Ben Widawskye3689192012-05-25 16:56:22 -07001305
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001306 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1307 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001308}
1309
Nick Hoathfbcc1a02015-10-20 10:23:52 +01001310static __always_inline void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001311gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
Nick Hoathfbcc1a02015-10-20 10:23:52 +01001312{
1313 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001314 notify_ring(engine);
Nick Hoathfbcc1a02015-10-20 10:23:52 +01001315 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +01001316 tasklet_schedule(&engine->irq_tasklet);
Nick Hoathfbcc1a02015-10-20 10:23:52 +01001317}
1318
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001319static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1320 u32 master_ctl,
1321 u32 gt_iir[4])
Ben Widawskyabd58f02013-11-02 21:07:09 -07001322{
Ben Widawskyabd58f02013-11-02 21:07:09 -07001323 irqreturn_t ret = IRQ_NONE;
1324
1325 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001326 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1327 if (gt_iir[0]) {
1328 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001329 ret = IRQ_HANDLED;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001330 } else
1331 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1332 }
1333
Zhao Yakui85f9b5f2014-04-17 10:37:38 +08001334 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001335 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1336 if (gt_iir[1]) {
1337 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001338 ret = IRQ_HANDLED;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001339 } else
1340 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1341 }
1342
Chris Wilson74cdb332015-04-07 16:21:05 +01001343 if (master_ctl & GEN8_GT_VECS_IRQ) {
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001344 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1345 if (gt_iir[3]) {
1346 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
Chris Wilson74cdb332015-04-07 16:21:05 +01001347 ret = IRQ_HANDLED;
Chris Wilson74cdb332015-04-07 16:21:05 +01001348 } else
1349 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1350 }
1351
Ben Widawsky09610212014-05-15 20:58:08 +03001352 if (master_ctl & GEN8_GT_PM_IRQ) {
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001353 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1354 if (gt_iir[2] & dev_priv->pm_rps_events) {
Chris Wilsoncb0d2052015-04-07 16:21:04 +01001355 I915_WRITE_FW(GEN8_GT_IIR(2),
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001356 gt_iir[2] & dev_priv->pm_rps_events);
Oscar Mateo38cc46d2014-06-16 16:10:59 +01001357 ret = IRQ_HANDLED;
Ben Widawsky09610212014-05-15 20:58:08 +03001358 } else
1359 DRM_ERROR("The master control interrupt lied (PM)!\n");
1360 }
1361
Ben Widawskyabd58f02013-11-02 21:07:09 -07001362 return ret;
1363}
1364
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001365static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1366 u32 gt_iir[4])
1367{
1368 if (gt_iir[0]) {
1369 gen8_cs_irq_handler(&dev_priv->engine[RCS],
1370 gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1371 gen8_cs_irq_handler(&dev_priv->engine[BCS],
1372 gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1373 }
1374
1375 if (gt_iir[1]) {
1376 gen8_cs_irq_handler(&dev_priv->engine[VCS],
1377 gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1378 gen8_cs_irq_handler(&dev_priv->engine[VCS2],
1379 gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1380 }
1381
1382 if (gt_iir[3])
1383 gen8_cs_irq_handler(&dev_priv->engine[VECS],
1384 gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1385
1386 if (gt_iir[2] & dev_priv->pm_rps_events)
1387 gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1388}
1389
Imre Deak63c88d22015-07-20 14:43:39 -07001390static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1391{
1392 switch (port) {
1393 case PORT_A:
Ville Syrjälä195baa02015-08-27 23:56:00 +03001394 return val & PORTA_HOTPLUG_LONG_DETECT;
Imre Deak63c88d22015-07-20 14:43:39 -07001395 case PORT_B:
1396 return val & PORTB_HOTPLUG_LONG_DETECT;
1397 case PORT_C:
1398 return val & PORTC_HOTPLUG_LONG_DETECT;
Imre Deak63c88d22015-07-20 14:43:39 -07001399 default:
1400 return false;
1401 }
1402}
1403
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03001404static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1405{
1406 switch (port) {
1407 case PORT_E:
1408 return val & PORTE_HOTPLUG_LONG_DETECT;
1409 default:
1410 return false;
1411 }
1412}
1413
Ville Syrjälä74c0b392015-08-27 23:56:07 +03001414static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1415{
1416 switch (port) {
1417 case PORT_A:
1418 return val & PORTA_HOTPLUG_LONG_DETECT;
1419 case PORT_B:
1420 return val & PORTB_HOTPLUG_LONG_DETECT;
1421 case PORT_C:
1422 return val & PORTC_HOTPLUG_LONG_DETECT;
1423 case PORT_D:
1424 return val & PORTD_HOTPLUG_LONG_DETECT;
1425 default:
1426 return false;
1427 }
1428}
1429
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03001430static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1431{
1432 switch (port) {
1433 case PORT_A:
1434 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1435 default:
1436 return false;
1437 }
1438}
1439
Jani Nikula676574d2015-05-28 15:43:53 +03001440static bool pch_port_hotplug_long_detect(enum port port, u32 val)
Dave Airlie13cf5502014-06-18 11:29:35 +10001441{
1442 switch (port) {
Dave Airlie13cf5502014-06-18 11:29:35 +10001443 case PORT_B:
Jani Nikula676574d2015-05-28 15:43:53 +03001444 return val & PORTB_HOTPLUG_LONG_DETECT;
Dave Airlie13cf5502014-06-18 11:29:35 +10001445 case PORT_C:
Jani Nikula676574d2015-05-28 15:43:53 +03001446 return val & PORTC_HOTPLUG_LONG_DETECT;
Dave Airlie13cf5502014-06-18 11:29:35 +10001447 case PORT_D:
Jani Nikula676574d2015-05-28 15:43:53 +03001448 return val & PORTD_HOTPLUG_LONG_DETECT;
1449 default:
1450 return false;
Dave Airlie13cf5502014-06-18 11:29:35 +10001451 }
1452}
1453
Jani Nikula676574d2015-05-28 15:43:53 +03001454static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
Dave Airlie13cf5502014-06-18 11:29:35 +10001455{
1456 switch (port) {
Dave Airlie13cf5502014-06-18 11:29:35 +10001457 case PORT_B:
Jani Nikula676574d2015-05-28 15:43:53 +03001458 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
Dave Airlie13cf5502014-06-18 11:29:35 +10001459 case PORT_C:
Jani Nikula676574d2015-05-28 15:43:53 +03001460 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
Dave Airlie13cf5502014-06-18 11:29:35 +10001461 case PORT_D:
Jani Nikula676574d2015-05-28 15:43:53 +03001462 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1463 default:
1464 return false;
Dave Airlie13cf5502014-06-18 11:29:35 +10001465 }
1466}
1467
Ville Syrjälä42db67d2015-08-28 21:26:27 +03001468/*
1469 * Get a bit mask of pins that have triggered, and which ones may be long.
1470 * This can be called multiple times with the same masks to accumulate
1471 * hotplug detection results from several registers.
1472 *
1473 * Note that the caller is expected to zero out the masks initially.
1474 */
Imre Deakfd63e2a2015-07-21 15:32:44 -07001475static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
Jani Nikula8c841e52015-06-18 13:06:17 +03001476 u32 hotplug_trigger, u32 dig_hotplug_reg,
Imre Deakfd63e2a2015-07-21 15:32:44 -07001477 const u32 hpd[HPD_NUM_PINS],
1478 bool long_pulse_detect(enum port port, u32 val))
Jani Nikula676574d2015-05-28 15:43:53 +03001479{
Jani Nikula8c841e52015-06-18 13:06:17 +03001480 enum port port;
Jani Nikula676574d2015-05-28 15:43:53 +03001481 int i;
1482
Jani Nikula676574d2015-05-28 15:43:53 +03001483 for_each_hpd_pin(i) {
Jani Nikula8c841e52015-06-18 13:06:17 +03001484 if ((hpd[i] & hotplug_trigger) == 0)
1485 continue;
Jani Nikula676574d2015-05-28 15:43:53 +03001486
Jani Nikula8c841e52015-06-18 13:06:17 +03001487 *pin_mask |= BIT(i);
1488
Imre Deakcc24fcd2015-07-21 15:32:45 -07001489 if (!intel_hpd_pin_to_port(i, &port))
1490 continue;
1491
Imre Deakfd63e2a2015-07-21 15:32:44 -07001492 if (long_pulse_detect(port, dig_hotplug_reg))
Jani Nikula8c841e52015-06-18 13:06:17 +03001493 *long_mask |= BIT(i);
Jani Nikula676574d2015-05-28 15:43:53 +03001494 }
1495
1496 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1497 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1498
1499}
1500
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001501static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001502{
Daniel Vetter28c70f12012-12-01 13:53:45 +01001503 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001504}
1505
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001506static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
Daniel Vetterce99c252012-12-01 13:53:47 +01001507{
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001508 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +01001509}
1510
Shuang He8bf1e9f2013-10-15 18:55:27 +01001511#if defined(CONFIG_DEBUG_FS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001512static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1513 enum pipe pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001514 uint32_t crc0, uint32_t crc1,
1515 uint32_t crc2, uint32_t crc3,
1516 uint32_t crc4)
Shuang He8bf1e9f2013-10-15 18:55:27 +01001517{
Shuang He8bf1e9f2013-10-15 18:55:27 +01001518 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1519 struct intel_pipe_crc_entry *entry;
Damien Lespiauac2300d2013-10-15 18:55:30 +01001520 int head, tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001521
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001522 spin_lock(&pipe_crc->lock);
1523
Damien Lespiau0c912c72013-10-15 18:55:37 +01001524 if (!pipe_crc->entries) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001525 spin_unlock(&pipe_crc->lock);
Daniel Vetter34273622014-11-26 16:29:04 +01001526 DRM_DEBUG_KMS("spurious interrupt\n");
Damien Lespiau0c912c72013-10-15 18:55:37 +01001527 return;
1528 }
1529
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001530 head = pipe_crc->head;
1531 tail = pipe_crc->tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001532
1533 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001534 spin_unlock(&pipe_crc->lock);
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001535 DRM_ERROR("CRC buffer overflowing\n");
1536 return;
1537 }
1538
1539 entry = &pipe_crc->entries[head];
Shuang He8bf1e9f2013-10-15 18:55:27 +01001540
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001541 entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev,
1542 pipe);
Daniel Vettereba94eb2013-10-16 22:55:46 +02001543 entry->crc[0] = crc0;
1544 entry->crc[1] = crc1;
1545 entry->crc[2] = crc2;
1546 entry->crc[3] = crc3;
1547 entry->crc[4] = crc4;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001548
1549 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001550 pipe_crc->head = head;
1551
1552 spin_unlock(&pipe_crc->lock);
Damien Lespiau07144422013-10-15 18:55:40 +01001553
1554 wake_up_interruptible(&pipe_crc->wq);
Shuang He8bf1e9f2013-10-15 18:55:27 +01001555}
Daniel Vetter277de952013-10-18 16:37:07 +02001556#else
1557static inline void
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001558display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1559 enum pipe pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001560 uint32_t crc0, uint32_t crc1,
1561 uint32_t crc2, uint32_t crc3,
1562 uint32_t crc4) {}
1563#endif
Daniel Vettereba94eb2013-10-16 22:55:46 +02001564
Daniel Vetter277de952013-10-18 16:37:07 +02001565
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001566static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1567 enum pipe pipe)
Daniel Vetter5a69b892013-10-16 22:55:52 +02001568{
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001569 display_pipe_crc_irq_handler(dev_priv, pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001570 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1571 0, 0, 0, 0);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001572}
1573
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001574static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1575 enum pipe pipe)
Daniel Vettereba94eb2013-10-16 22:55:46 +02001576{
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001577 display_pipe_crc_irq_handler(dev_priv, pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001578 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1579 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1580 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1581 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1582 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
Daniel Vettereba94eb2013-10-16 22:55:46 +02001583}
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001584
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001585static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1586 enum pipe pipe)
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001587{
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001588 uint32_t res1, res2;
1589
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001590 if (INTEL_GEN(dev_priv) >= 3)
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001591 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1592 else
1593 res1 = 0;
1594
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001595 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001596 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1597 else
1598 res2 = 0;
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001599
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001600 display_pipe_crc_irq_handler(dev_priv, pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001601 I915_READ(PIPE_CRC_RES_RED(pipe)),
1602 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1603 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1604 res1, res2);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001605}
Shuang He8bf1e9f2013-10-15 18:55:27 +01001606
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001607/* The RPS events need forcewake, so we add them to a work queue and mask their
1608 * IMR bits until the work is done. Other interrupts can be processed without
1609 * the work queue. */
1610static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
Ben Widawskybaf02a12013-05-28 19:22:24 -07001611{
Deepak Sa6706b42014-03-15 20:23:22 +05301612 if (pm_iir & dev_priv->pm_rps_events) {
Daniel Vetter59cdb632013-07-04 23:35:28 +02001613 spin_lock(&dev_priv->irq_lock);
Daniel Vetter480c8032014-07-16 09:49:40 +02001614 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
Imre Deakd4d70aa2014-11-19 15:30:04 +02001615 if (dev_priv->rps.interrupts_enabled) {
1616 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1617 queue_work(dev_priv->wq, &dev_priv->rps.work);
1618 }
Daniel Vetter59cdb632013-07-04 23:35:28 +02001619 spin_unlock(&dev_priv->irq_lock);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001620 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001621
Imre Deakc9a9a262014-11-05 20:48:37 +02001622 if (INTEL_INFO(dev_priv)->gen >= 8)
1623 return;
1624
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001625 if (HAS_VEBOX(dev_priv)) {
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001626 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001627 notify_ring(&dev_priv->engine[VECS]);
Ben Widawsky12638c52013-05-28 19:22:31 -07001628
Daniel Vetteraaecdf62014-11-04 15:52:22 +01001629 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1630 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
Ben Widawsky12638c52013-05-28 19:22:31 -07001631 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001632}
1633
Daniel Vetter5a21b662016-05-24 17:13:53 +02001634static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001635 enum pipe pipe)
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03001636{
Daniel Vetter5a21b662016-05-24 17:13:53 +02001637 bool ret;
1638
1639 ret = drm_handle_vblank(dev_priv->dev, pipe);
1640 if (ret)
Maarten Lankhorst51cbaf02016-05-17 15:07:49 +02001641 intel_finish_page_flip_mmio(dev_priv, pipe);
Daniel Vetter5a21b662016-05-24 17:13:53 +02001642
1643 return ret;
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03001644}
1645
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001646static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1647 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
Imre Deakc1874ed2014-02-04 21:35:46 +02001648{
Imre Deakc1874ed2014-02-04 21:35:46 +02001649 int pipe;
1650
Imre Deak58ead0d2014-02-04 21:35:47 +02001651 spin_lock(&dev_priv->irq_lock);
Ville Syrjälä1ca993d2016-02-18 21:54:26 +02001652
1653 if (!dev_priv->display_irqs_enabled) {
1654 spin_unlock(&dev_priv->irq_lock);
1655 return;
1656 }
1657
Damien Lespiau055e3932014-08-18 13:49:10 +01001658 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001659 i915_reg_t reg;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001660 u32 mask, iir_bit = 0;
Imre Deak91d181d2014-02-10 18:42:49 +02001661
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001662 /*
1663 * PIPESTAT bits get signalled even when the interrupt is
1664 * disabled with the mask bits, and some of the status bits do
1665 * not generate interrupts at all (like the underrun bit). Hence
1666 * we need to be careful that we only handle what we want to
1667 * handle.
1668 */
Daniel Vetter0f239f42014-09-30 10:56:49 +02001669
1670 /* fifo underruns are filterered in the underrun handler. */
1671 mask = PIPE_FIFO_UNDERRUN_STATUS;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001672
1673 switch (pipe) {
1674 case PIPE_A:
1675 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1676 break;
1677 case PIPE_B:
1678 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1679 break;
Ville Syrjälä3278f672014-04-09 13:28:49 +03001680 case PIPE_C:
1681 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1682 break;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001683 }
1684 if (iir & iir_bit)
1685 mask |= dev_priv->pipestat_irq_mask[pipe];
1686
1687 if (!mask)
Imre Deak91d181d2014-02-10 18:42:49 +02001688 continue;
1689
1690 reg = PIPESTAT(pipe);
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001691 mask |= PIPESTAT_INT_ENABLE_MASK;
1692 pipe_stats[pipe] = I915_READ(reg) & mask;
Imre Deakc1874ed2014-02-04 21:35:46 +02001693
1694 /*
1695 * Clear the PIPE*STAT regs before the IIR
1696 */
Imre Deak91d181d2014-02-10 18:42:49 +02001697 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1698 PIPESTAT_INT_STATUS_MASK))
Imre Deakc1874ed2014-02-04 21:35:46 +02001699 I915_WRITE(reg, pipe_stats[pipe]);
1700 }
Imre Deak58ead0d2014-02-04 21:35:47 +02001701 spin_unlock(&dev_priv->irq_lock);
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001702}
1703
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001704static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001705 u32 pipe_stats[I915_MAX_PIPES])
1706{
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001707 enum pipe pipe;
Imre Deakc1874ed2014-02-04 21:35:46 +02001708
Damien Lespiau055e3932014-08-18 13:49:10 +01001709 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02001710 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1711 intel_pipe_handle_vblank(dev_priv, pipe))
1712 intel_check_page_flip(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001713
Maarten Lankhorst5251f042016-05-17 15:07:47 +02001714 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
Maarten Lankhorst51cbaf02016-05-17 15:07:49 +02001715 intel_finish_page_flip_cs(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001716
1717 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001718 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001719
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001720 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1721 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001722 }
1723
1724 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001725 gmbus_irq_handler(dev_priv);
Imre Deakc1874ed2014-02-04 21:35:46 +02001726}
1727
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001728static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001729{
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001730 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001731
1732 if (hotplug_status)
1733 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1734
1735 return hotplug_status;
1736}
1737
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001738static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001739 u32 hotplug_status)
1740{
Ville Syrjälä42db67d2015-08-28 21:26:27 +03001741 u32 pin_mask = 0, long_mask = 0;
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001742
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001743 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1744 IS_CHERRYVIEW(dev_priv)) {
Jani Nikula0d2e4292015-05-27 15:03:39 +03001745 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001746
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001747 if (hotplug_trigger) {
1748 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1749 hotplug_trigger, hpd_status_g4x,
1750 i9xx_port_hotplug_long_detect);
1751
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001752 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001753 }
Jani Nikula369712e2015-05-27 15:03:40 +03001754
1755 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001756 dp_aux_irq_handler(dev_priv);
Jani Nikula0d2e4292015-05-27 15:03:39 +03001757 } else {
1758 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001759
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001760 if (hotplug_trigger) {
1761 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
Daniel Vetter44cc6c02015-09-30 08:47:41 +02001762 hotplug_trigger, hpd_status_i915,
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001763 i9xx_port_hotplug_long_detect);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001764 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001765 }
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001766 }
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001767}
1768
Daniel Vetterff1f5252012-10-02 15:10:55 +02001769static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001770{
Daniel Vetter45a83f82014-05-12 19:17:55 +02001771 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03001772 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001773 irqreturn_t ret = IRQ_NONE;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001774
Imre Deak2dd2a882015-02-24 11:14:30 +02001775 if (!intel_irqs_enabled(dev_priv))
1776 return IRQ_NONE;
1777
Imre Deak1f814da2015-12-16 02:52:19 +02001778 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1779 disable_rpm_wakeref_asserts(dev_priv);
1780
Ville Syrjälä1e1cace2016-04-13 21:19:52 +03001781 do {
Ville Syrjälä6e814802016-04-13 21:19:53 +03001782 u32 iir, gt_iir, pm_iir;
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001783 u32 pipe_stats[I915_MAX_PIPES] = {};
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001784 u32 hotplug_status = 0;
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001785 u32 ier = 0;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001786
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001787 gt_iir = I915_READ(GTIIR);
1788 pm_iir = I915_READ(GEN6_PMIIR);
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001789 iir = I915_READ(VLV_IIR);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001790
1791 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
Ville Syrjälä1e1cace2016-04-13 21:19:52 +03001792 break;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001793
1794 ret = IRQ_HANDLED;
1795
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001796 /*
1797 * Theory on interrupt generation, based on empirical evidence:
1798 *
1799 * x = ((VLV_IIR & VLV_IER) ||
1800 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1801 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1802 *
1803 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1804 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1805 * guarantee the CPU interrupt will be raised again even if we
1806 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1807 * bits this time around.
1808 */
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03001809 I915_WRITE(VLV_MASTER_IER, 0);
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001810 ier = I915_READ(VLV_IER);
1811 I915_WRITE(VLV_IER, 0);
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03001812
1813 if (gt_iir)
1814 I915_WRITE(GTIIR, gt_iir);
1815 if (pm_iir)
1816 I915_WRITE(GEN6_PMIIR, pm_iir);
1817
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001818 if (iir & I915_DISPLAY_PORT_INTERRUPT)
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001819 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001820
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001821 /* Call regardless, as some status bits might not be
1822 * signalled in iir */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001823 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001824
1825 /*
1826 * VLV_IIR is single buffered, and reflects the level
1827 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1828 */
1829 if (iir)
1830 I915_WRITE(VLV_IIR, iir);
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03001831
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001832 I915_WRITE(VLV_IER, ier);
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03001833 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1834 POSTING_READ(VLV_MASTER_IER);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001835
Ville Syrjälä52894872016-04-13 21:19:56 +03001836 if (gt_iir)
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001837 snb_gt_irq_handler(dev_priv, gt_iir);
Ville Syrjälä52894872016-04-13 21:19:56 +03001838 if (pm_iir)
1839 gen6_rps_irq_handler(dev_priv, pm_iir);
1840
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001841 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001842 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001843
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001844 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
Ville Syrjälä1e1cace2016-04-13 21:19:52 +03001845 } while (0);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001846
Imre Deak1f814da2015-12-16 02:52:19 +02001847 enable_rpm_wakeref_asserts(dev_priv);
1848
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001849 return ret;
1850}
1851
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001852static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1853{
Daniel Vetter45a83f82014-05-12 19:17:55 +02001854 struct drm_device *dev = arg;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001855 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001856 irqreturn_t ret = IRQ_NONE;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001857
Imre Deak2dd2a882015-02-24 11:14:30 +02001858 if (!intel_irqs_enabled(dev_priv))
1859 return IRQ_NONE;
1860
Imre Deak1f814da2015-12-16 02:52:19 +02001861 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1862 disable_rpm_wakeref_asserts(dev_priv);
1863
Chris Wilson579de732016-03-14 09:01:57 +00001864 do {
Ville Syrjälä6e814802016-04-13 21:19:53 +03001865 u32 master_ctl, iir;
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001866 u32 gt_iir[4] = {};
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001867 u32 pipe_stats[I915_MAX_PIPES] = {};
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001868 u32 hotplug_status = 0;
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001869 u32 ier = 0;
1870
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001871 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1872 iir = I915_READ(VLV_IIR);
Ville Syrjälä3278f672014-04-09 13:28:49 +03001873
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001874 if (master_ctl == 0 && iir == 0)
1875 break;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001876
Oscar Mateo27b6c122014-06-16 16:11:00 +01001877 ret = IRQ_HANDLED;
1878
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001879 /*
1880 * Theory on interrupt generation, based on empirical evidence:
1881 *
1882 * x = ((VLV_IIR & VLV_IER) ||
1883 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1884 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1885 *
1886 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1887 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1888 * guarantee the CPU interrupt will be raised again even if we
1889 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1890 * bits this time around.
1891 */
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001892 I915_WRITE(GEN8_MASTER_IRQ, 0);
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001893 ier = I915_READ(VLV_IER);
1894 I915_WRITE(VLV_IER, 0);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001895
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001896 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001897
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001898 if (iir & I915_DISPLAY_PORT_INTERRUPT)
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001899 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001900
Oscar Mateo27b6c122014-06-16 16:11:00 +01001901 /* Call regardless, as some status bits might not be
1902 * signalled in iir */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001903 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001904
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001905 /*
1906 * VLV_IIR is single buffered, and reflects the level
1907 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1908 */
1909 if (iir)
1910 I915_WRITE(VLV_IIR, iir);
1911
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001912 I915_WRITE(VLV_IER, ier);
Ville Syrjäläe5328c42016-04-13 21:19:47 +03001913 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001914 POSTING_READ(GEN8_MASTER_IRQ);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001915
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001916 gen8_gt_irq_handler(dev_priv, gt_iir);
1917
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001918 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001919 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001920
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001921 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
Chris Wilson579de732016-03-14 09:01:57 +00001922 } while (0);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001923
Imre Deak1f814da2015-12-16 02:52:19 +02001924 enable_rpm_wakeref_asserts(dev_priv);
1925
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001926 return ret;
1927}
1928
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001929static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1930 u32 hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03001931 const u32 hpd[HPD_NUM_PINS])
1932{
Ville Syrjälä40e56412015-08-27 23:56:10 +03001933 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1934
Jani Nikula6a39d7c2015-11-25 16:47:22 +02001935 /*
1936 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1937 * unless we touch the hotplug register, even if hotplug_trigger is
1938 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1939 * errors.
1940 */
Ville Syrjälä40e56412015-08-27 23:56:10 +03001941 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
Jani Nikula6a39d7c2015-11-25 16:47:22 +02001942 if (!hotplug_trigger) {
1943 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1944 PORTD_HOTPLUG_STATUS_MASK |
1945 PORTC_HOTPLUG_STATUS_MASK |
1946 PORTB_HOTPLUG_STATUS_MASK;
1947 dig_hotplug_reg &= ~mask;
1948 }
1949
Ville Syrjälä40e56412015-08-27 23:56:10 +03001950 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
Jani Nikula6a39d7c2015-11-25 16:47:22 +02001951 if (!hotplug_trigger)
1952 return;
Ville Syrjälä40e56412015-08-27 23:56:10 +03001953
1954 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1955 dig_hotplug_reg, hpd,
1956 pch_port_hotplug_long_detect);
1957
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001958 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä40e56412015-08-27 23:56:10 +03001959}
1960
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001961static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08001962{
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001963 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001964 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08001965
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001966 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
Daniel Vetter91d131d2013-06-27 17:52:14 +02001967
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001968 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1969 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1970 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08001971 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001972 port_name(port));
1973 }
Jesse Barnes776ad802011-01-04 15:09:39 -08001974
Daniel Vetterce99c252012-12-01 13:53:47 +01001975 if (pch_iir & SDE_AUX_MASK)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001976 dp_aux_irq_handler(dev_priv);
Daniel Vetterce99c252012-12-01 13:53:47 +01001977
Jesse Barnes776ad802011-01-04 15:09:39 -08001978 if (pch_iir & SDE_GMBUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001979 gmbus_irq_handler(dev_priv);
Jesse Barnes776ad802011-01-04 15:09:39 -08001980
1981 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1982 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1983
1984 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1985 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1986
1987 if (pch_iir & SDE_POISON)
1988 DRM_ERROR("PCH poison interrupt\n");
1989
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001990 if (pch_iir & SDE_FDI_MASK)
Damien Lespiau055e3932014-08-18 13:49:10 +01001991 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001992 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1993 pipe_name(pipe),
1994 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08001995
1996 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1997 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1998
1999 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2000 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2001
Jesse Barnes776ad802011-01-04 15:09:39 -08002002 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002003 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
Paulo Zanoni86642812013-04-12 17:57:57 -03002004
2005 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002006 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
Paulo Zanoni86642812013-04-12 17:57:57 -03002007}
2008
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002009static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
Paulo Zanoni86642812013-04-12 17:57:57 -03002010{
Paulo Zanoni86642812013-04-12 17:57:57 -03002011 u32 err_int = I915_READ(GEN7_ERR_INT);
Daniel Vetter5a69b892013-10-16 22:55:52 +02002012 enum pipe pipe;
Paulo Zanoni86642812013-04-12 17:57:57 -03002013
Paulo Zanonide032bf2013-04-12 17:57:58 -03002014 if (err_int & ERR_INT_POISON)
2015 DRM_ERROR("Poison interrupt\n");
2016
Damien Lespiau055e3932014-08-18 13:49:10 +01002017 for_each_pipe(dev_priv, pipe) {
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002018 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2019 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Paulo Zanoni86642812013-04-12 17:57:57 -03002020
Daniel Vetter5a69b892013-10-16 22:55:52 +02002021 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002022 if (IS_IVYBRIDGE(dev_priv))
2023 ivb_pipe_crc_irq_handler(dev_priv, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02002024 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002025 hsw_pipe_crc_irq_handler(dev_priv, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02002026 }
2027 }
Shuang He8bf1e9f2013-10-15 18:55:27 +01002028
Paulo Zanoni86642812013-04-12 17:57:57 -03002029 I915_WRITE(GEN7_ERR_INT, err_int);
2030}
2031
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002032static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
Paulo Zanoni86642812013-04-12 17:57:57 -03002033{
Paulo Zanoni86642812013-04-12 17:57:57 -03002034 u32 serr_int = I915_READ(SERR_INT);
2035
Paulo Zanonide032bf2013-04-12 17:57:58 -03002036 if (serr_int & SERR_INT_POISON)
2037 DRM_ERROR("PCH poison interrupt\n");
2038
Paulo Zanoni86642812013-04-12 17:57:57 -03002039 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002040 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
Paulo Zanoni86642812013-04-12 17:57:57 -03002041
2042 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002043 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
Paulo Zanoni86642812013-04-12 17:57:57 -03002044
2045 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002046 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
Paulo Zanoni86642812013-04-12 17:57:57 -03002047
2048 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08002049}
2050
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002051static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
Adam Jackson23e81d62012-06-06 15:45:44 -04002052{
Adam Jackson23e81d62012-06-06 15:45:44 -04002053 int pipe;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002054 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04002055
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002056 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
Daniel Vetter91d131d2013-06-27 17:52:14 +02002057
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03002058 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2059 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2060 SDE_AUDIO_POWER_SHIFT_CPT);
2061 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2062 port_name(port));
2063 }
Adam Jackson23e81d62012-06-06 15:45:44 -04002064
2065 if (pch_iir & SDE_AUX_MASK_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002066 dp_aux_irq_handler(dev_priv);
Adam Jackson23e81d62012-06-06 15:45:44 -04002067
2068 if (pch_iir & SDE_GMBUS_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002069 gmbus_irq_handler(dev_priv);
Adam Jackson23e81d62012-06-06 15:45:44 -04002070
2071 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2072 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2073
2074 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2075 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2076
2077 if (pch_iir & SDE_FDI_MASK_CPT)
Damien Lespiau055e3932014-08-18 13:49:10 +01002078 for_each_pipe(dev_priv, pipe)
Adam Jackson23e81d62012-06-06 15:45:44 -04002079 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2080 pipe_name(pipe),
2081 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03002082
2083 if (pch_iir & SDE_ERROR_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002084 cpt_serr_int_handler(dev_priv);
Adam Jackson23e81d62012-06-06 15:45:44 -04002085}
2086
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002087static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002088{
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002089 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2090 ~SDE_PORTE_HOTPLUG_SPT;
2091 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2092 u32 pin_mask = 0, long_mask = 0;
2093
2094 if (hotplug_trigger) {
2095 u32 dig_hotplug_reg;
2096
2097 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2098 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2099
2100 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2101 dig_hotplug_reg, hpd_spt,
Ville Syrjälä74c0b392015-08-27 23:56:07 +03002102 spt_port_hotplug_long_detect);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002103 }
2104
2105 if (hotplug2_trigger) {
2106 u32 dig_hotplug_reg;
2107
2108 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2109 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2110
2111 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2112 dig_hotplug_reg, hpd_spt,
2113 spt_port_hotplug2_long_detect);
2114 }
2115
2116 if (pin_mask)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002117 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002118
2119 if (pch_iir & SDE_GMBUS_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002120 gmbus_irq_handler(dev_priv);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002121}
2122
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002123static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2124 u32 hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002125 const u32 hpd[HPD_NUM_PINS])
2126{
Ville Syrjälä40e56412015-08-27 23:56:10 +03002127 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2128
2129 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2130 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2131
2132 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2133 dig_hotplug_reg, hpd,
2134 ilk_port_hotplug_long_detect);
2135
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002136 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä40e56412015-08-27 23:56:10 +03002137}
2138
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002139static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2140 u32 de_iir)
Paulo Zanonic008bc62013-07-12 16:35:10 -03002141{
Daniel Vetter40da17c22013-10-21 18:04:36 +02002142 enum pipe pipe;
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03002143 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2144
Ville Syrjälä40e56412015-08-27 23:56:10 +03002145 if (hotplug_trigger)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002146 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002147
2148 if (de_iir & DE_AUX_CHANNEL_A)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002149 dp_aux_irq_handler(dev_priv);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002150
2151 if (de_iir & DE_GSE)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002152 intel_opregion_asle_intr(dev_priv);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002153
Paulo Zanonic008bc62013-07-12 16:35:10 -03002154 if (de_iir & DE_POISON)
2155 DRM_ERROR("Poison interrupt\n");
2156
Damien Lespiau055e3932014-08-18 13:49:10 +01002157 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02002158 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2159 intel_pipe_handle_vblank(dev_priv, pipe))
2160 intel_check_page_flip(dev_priv, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002161
Daniel Vetter40da17c22013-10-21 18:04:36 +02002162 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002163 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002164
Daniel Vetter40da17c22013-10-21 18:04:36 +02002165 if (de_iir & DE_PIPE_CRC_DONE(pipe))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002166 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02002167
Daniel Vetter40da17c22013-10-21 18:04:36 +02002168 /* plane/pipes map 1:1 on ilk+ */
Maarten Lankhorst5251f042016-05-17 15:07:47 +02002169 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
Maarten Lankhorst51cbaf02016-05-17 15:07:49 +02002170 intel_finish_page_flip_cs(dev_priv, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002171 }
2172
2173 /* check event from PCH */
2174 if (de_iir & DE_PCH_EVENT) {
2175 u32 pch_iir = I915_READ(SDEIIR);
2176
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002177 if (HAS_PCH_CPT(dev_priv))
2178 cpt_irq_handler(dev_priv, pch_iir);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002179 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002180 ibx_irq_handler(dev_priv, pch_iir);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002181
2182 /* should clear PCH hotplug event before clear CPU irq */
2183 I915_WRITE(SDEIIR, pch_iir);
2184 }
2185
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002186 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2187 ironlake_rps_change_irq_handler(dev_priv);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002188}
2189
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002190static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2191 u32 de_iir)
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002192{
Damien Lespiau07d27e22014-03-03 17:31:46 +00002193 enum pipe pipe;
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03002194 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2195
Ville Syrjälä40e56412015-08-27 23:56:10 +03002196 if (hotplug_trigger)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002197 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002198
2199 if (de_iir & DE_ERR_INT_IVB)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002200 ivb_err_int_handler(dev_priv);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002201
2202 if (de_iir & DE_AUX_CHANNEL_A_IVB)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002203 dp_aux_irq_handler(dev_priv);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002204
2205 if (de_iir & DE_GSE_IVB)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002206 intel_opregion_asle_intr(dev_priv);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002207
Damien Lespiau055e3932014-08-18 13:49:10 +01002208 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02002209 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2210 intel_pipe_handle_vblank(dev_priv, pipe))
2211 intel_check_page_flip(dev_priv, pipe);
Daniel Vetter40da17c22013-10-21 18:04:36 +02002212
2213 /* plane/pipes map 1:1 on ilk+ */
Maarten Lankhorst5251f042016-05-17 15:07:47 +02002214 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
Maarten Lankhorst51cbaf02016-05-17 15:07:49 +02002215 intel_finish_page_flip_cs(dev_priv, pipe);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002216 }
2217
2218 /* check event from PCH */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002219 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002220 u32 pch_iir = I915_READ(SDEIIR);
2221
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002222 cpt_irq_handler(dev_priv, pch_iir);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002223
2224 /* clear PCH hotplug event before clear CPU irq */
2225 I915_WRITE(SDEIIR, pch_iir);
2226 }
2227}
2228
Oscar Mateo72c90f62014-06-16 16:10:57 +01002229/*
2230 * To handle irqs with the minimum potential races with fresh interrupts, we:
2231 * 1 - Disable Master Interrupt Control.
2232 * 2 - Find the source(s) of the interrupt.
2233 * 3 - Clear the Interrupt Identity bits (IIR).
2234 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2235 * 5 - Re-enable Master Interrupt Control.
2236 */
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002237static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002238{
Daniel Vetter45a83f82014-05-12 19:17:55 +02002239 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03002240 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002241 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01002242 irqreturn_t ret = IRQ_NONE;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002243
Imre Deak2dd2a882015-02-24 11:14:30 +02002244 if (!intel_irqs_enabled(dev_priv))
2245 return IRQ_NONE;
2246
Imre Deak1f814da2015-12-16 02:52:19 +02002247 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2248 disable_rpm_wakeref_asserts(dev_priv);
2249
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002250 /* disable master interrupt before clearing iir */
2251 de_ier = I915_READ(DEIER);
2252 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Paulo Zanoni23a78512013-07-12 16:35:14 -03002253 POSTING_READ(DEIER);
Chris Wilson0e434062012-05-09 21:45:44 +01002254
Paulo Zanoni44498ae2013-02-22 17:05:28 -03002255 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2256 * interrupts will will be stored on its back queue, and then we'll be
2257 * able to process them after we restore SDEIER (as soon as we restore
2258 * it, we'll get an interrupt if SDEIIR still has something to process
2259 * due to its back queue). */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002260 if (!HAS_PCH_NOP(dev_priv)) {
Ben Widawskyab5c6082013-04-05 13:12:41 -07002261 sde_ier = I915_READ(SDEIER);
2262 I915_WRITE(SDEIER, 0);
2263 POSTING_READ(SDEIER);
2264 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03002265
Oscar Mateo72c90f62014-06-16 16:10:57 +01002266 /* Find, clear, then process each source of interrupt */
2267
Chris Wilson0e434062012-05-09 21:45:44 +01002268 gt_iir = I915_READ(GTIIR);
2269 if (gt_iir) {
Oscar Mateo72c90f62014-06-16 16:10:57 +01002270 I915_WRITE(GTIIR, gt_iir);
2271 ret = IRQ_HANDLED;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002272 if (INTEL_GEN(dev_priv) >= 6)
Ville Syrjälä261e40b2016-04-13 21:19:57 +03002273 snb_gt_irq_handler(dev_priv, gt_iir);
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03002274 else
Ville Syrjälä261e40b2016-04-13 21:19:57 +03002275 ilk_gt_irq_handler(dev_priv, gt_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01002276 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002277
2278 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01002279 if (de_iir) {
Oscar Mateo72c90f62014-06-16 16:10:57 +01002280 I915_WRITE(DEIIR, de_iir);
2281 ret = IRQ_HANDLED;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002282 if (INTEL_GEN(dev_priv) >= 7)
2283 ivb_display_irq_handler(dev_priv, de_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002284 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002285 ilk_display_irq_handler(dev_priv, de_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01002286 }
2287
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002288 if (INTEL_GEN(dev_priv) >= 6) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002289 u32 pm_iir = I915_READ(GEN6_PMIIR);
2290 if (pm_iir) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002291 I915_WRITE(GEN6_PMIIR, pm_iir);
2292 ret = IRQ_HANDLED;
Oscar Mateo72c90f62014-06-16 16:10:57 +01002293 gen6_rps_irq_handler(dev_priv, pm_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002294 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002295 }
2296
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002297 I915_WRITE(DEIER, de_ier);
2298 POSTING_READ(DEIER);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002299 if (!HAS_PCH_NOP(dev_priv)) {
Ben Widawskyab5c6082013-04-05 13:12:41 -07002300 I915_WRITE(SDEIER, sde_ier);
2301 POSTING_READ(SDEIER);
2302 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002303
Imre Deak1f814da2015-12-16 02:52:19 +02002304 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2305 enable_rpm_wakeref_asserts(dev_priv);
2306
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002307 return ret;
2308}
2309
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002310static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2311 u32 hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002312 const u32 hpd[HPD_NUM_PINS])
Shashank Sharmad04a4922014-08-22 17:40:41 +05302313{
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002314 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
Shashank Sharmad04a4922014-08-22 17:40:41 +05302315
Ville Syrjäläa52bb152015-08-27 23:56:11 +03002316 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2317 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
Shashank Sharmad04a4922014-08-22 17:40:41 +05302318
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002319 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002320 dig_hotplug_reg, hpd,
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002321 bxt_port_hotplug_long_detect);
Ville Syrjälä40e56412015-08-27 23:56:10 +03002322
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002323 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Shashank Sharmad04a4922014-08-22 17:40:41 +05302324}
2325
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002326static irqreturn_t
2327gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
Ben Widawskyabd58f02013-11-02 21:07:09 -07002328{
Ben Widawskyabd58f02013-11-02 21:07:09 -07002329 irqreturn_t ret = IRQ_NONE;
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002330 u32 iir;
Daniel Vetterc42664c2013-11-07 11:05:40 +01002331 enum pipe pipe;
Jesse Barnes88e04702014-11-13 17:51:48 +00002332
Ben Widawskyabd58f02013-11-02 21:07:09 -07002333 if (master_ctl & GEN8_DE_MISC_IRQ) {
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002334 iir = I915_READ(GEN8_DE_MISC_IIR);
2335 if (iir) {
2336 I915_WRITE(GEN8_DE_MISC_IIR, iir);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002337 ret = IRQ_HANDLED;
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002338 if (iir & GEN8_DE_MISC_GSE)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002339 intel_opregion_asle_intr(dev_priv);
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002340 else
2341 DRM_ERROR("Unexpected DE Misc interrupt\n");
Ben Widawskyabd58f02013-11-02 21:07:09 -07002342 }
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002343 else
2344 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
Ben Widawskyabd58f02013-11-02 21:07:09 -07002345 }
2346
Daniel Vetter6d766f02013-11-07 14:49:55 +01002347 if (master_ctl & GEN8_DE_PORT_IRQ) {
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002348 iir = I915_READ(GEN8_DE_PORT_IIR);
2349 if (iir) {
2350 u32 tmp_mask;
Shashank Sharmad04a4922014-08-22 17:40:41 +05302351 bool found = false;
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002352
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002353 I915_WRITE(GEN8_DE_PORT_IIR, iir);
Daniel Vetter6d766f02013-11-07 14:49:55 +01002354 ret = IRQ_HANDLED;
Jesse Barnes88e04702014-11-13 17:51:48 +00002355
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002356 tmp_mask = GEN8_AUX_CHANNEL_A;
2357 if (INTEL_INFO(dev_priv)->gen >= 9)
2358 tmp_mask |= GEN9_AUX_CHANNEL_B |
2359 GEN9_AUX_CHANNEL_C |
2360 GEN9_AUX_CHANNEL_D;
2361
2362 if (iir & tmp_mask) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002363 dp_aux_irq_handler(dev_priv);
Shashank Sharmad04a4922014-08-22 17:40:41 +05302364 found = true;
2365 }
2366
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002367 if (IS_BROXTON(dev_priv)) {
2368 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2369 if (tmp_mask) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002370 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2371 hpd_bxt);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002372 found = true;
2373 }
2374 } else if (IS_BROADWELL(dev_priv)) {
2375 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2376 if (tmp_mask) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002377 ilk_hpd_irq_handler(dev_priv,
2378 tmp_mask, hpd_bdw);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002379 found = true;
2380 }
Shashank Sharmad04a4922014-08-22 17:40:41 +05302381 }
2382
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002383 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2384 gmbus_irq_handler(dev_priv);
Shashank Sharma9e637432014-08-22 17:40:43 +05302385 found = true;
2386 }
2387
Shashank Sharmad04a4922014-08-22 17:40:41 +05302388 if (!found)
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002389 DRM_ERROR("Unexpected DE Port interrupt\n");
Daniel Vetter6d766f02013-11-07 14:49:55 +01002390 }
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002391 else
2392 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
Daniel Vetter6d766f02013-11-07 14:49:55 +01002393 }
2394
Damien Lespiau055e3932014-08-18 13:49:10 +01002395 for_each_pipe(dev_priv, pipe) {
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002396 u32 flip_done, fault_errors;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002397
Daniel Vetterc42664c2013-11-07 11:05:40 +01002398 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2399 continue;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002400
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002401 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2402 if (!iir) {
Ben Widawskyabd58f02013-11-02 21:07:09 -07002403 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002404 continue;
2405 }
2406
2407 ret = IRQ_HANDLED;
2408 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2409
Daniel Vetter5a21b662016-05-24 17:13:53 +02002410 if (iir & GEN8_PIPE_VBLANK &&
2411 intel_pipe_handle_vblank(dev_priv, pipe))
2412 intel_check_page_flip(dev_priv, pipe);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002413
2414 flip_done = iir;
2415 if (INTEL_INFO(dev_priv)->gen >= 9)
2416 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2417 else
2418 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2419
Maarten Lankhorst5251f042016-05-17 15:07:47 +02002420 if (flip_done)
Maarten Lankhorst51cbaf02016-05-17 15:07:49 +02002421 intel_finish_page_flip_cs(dev_priv, pipe);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002422
2423 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002424 hsw_pipe_crc_irq_handler(dev_priv, pipe);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002425
2426 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2427 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2428
2429 fault_errors = iir;
2430 if (INTEL_INFO(dev_priv)->gen >= 9)
2431 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2432 else
2433 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2434
2435 if (fault_errors)
2436 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2437 pipe_name(pipe),
2438 fault_errors);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002439 }
2440
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002441 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
Shashank Sharma266ea3d2014-08-22 17:40:42 +05302442 master_ctl & GEN8_DE_PCH_IRQ) {
Daniel Vetter92d03a82013-11-07 11:05:43 +01002443 /*
2444 * FIXME(BDW): Assume for now that the new interrupt handling
2445 * scheme also closed the SDE interrupt handling race we've seen
2446 * on older pch-split platforms. But this needs testing.
2447 */
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002448 iir = I915_READ(SDEIIR);
2449 if (iir) {
2450 I915_WRITE(SDEIIR, iir);
Daniel Vetter92d03a82013-11-07 11:05:43 +01002451 ret = IRQ_HANDLED;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002452
2453 if (HAS_PCH_SPT(dev_priv))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002454 spt_irq_handler(dev_priv, iir);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002455 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002456 cpt_irq_handler(dev_priv, iir);
Jani Nikula2dfb0b82016-01-07 10:29:10 +02002457 } else {
2458 /*
2459 * Like on previous PCH there seems to be something
2460 * fishy going on with forwarding PCH interrupts.
2461 */
2462 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2463 }
Daniel Vetter92d03a82013-11-07 11:05:43 +01002464 }
2465
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002466 return ret;
2467}
2468
2469static irqreturn_t gen8_irq_handler(int irq, void *arg)
2470{
2471 struct drm_device *dev = arg;
2472 struct drm_i915_private *dev_priv = dev->dev_private;
2473 u32 master_ctl;
Ville Syrjäläe30e2512016-04-13 21:19:58 +03002474 u32 gt_iir[4] = {};
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002475 irqreturn_t ret;
2476
2477 if (!intel_irqs_enabled(dev_priv))
2478 return IRQ_NONE;
2479
2480 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2481 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2482 if (!master_ctl)
2483 return IRQ_NONE;
2484
2485 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2486
2487 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2488 disable_rpm_wakeref_asserts(dev_priv);
2489
2490 /* Find, clear, then process each source of interrupt */
Ville Syrjäläe30e2512016-04-13 21:19:58 +03002491 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2492 gen8_gt_irq_handler(dev_priv, gt_iir);
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002493 ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2494
Chris Wilsoncb0d2052015-04-07 16:21:04 +01002495 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2496 POSTING_READ_FW(GEN8_MASTER_IRQ);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002497
Imre Deak1f814da2015-12-16 02:52:19 +02002498 enable_rpm_wakeref_asserts(dev_priv);
2499
Ben Widawskyabd58f02013-11-02 21:07:09 -07002500 return ret;
2501}
2502
Daniel Vetter17e1df02013-09-08 21:57:13 +02002503static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2504 bool reset_completed)
2505{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002506 struct intel_engine_cs *engine;
Daniel Vetter17e1df02013-09-08 21:57:13 +02002507
2508 /*
2509 * Notify all waiters for GPU completion events that reset state has
2510 * been changed, and that they need to restart their wait after
2511 * checking for potential errors (and bail out to drop locks if there is
2512 * a gpu reset pending so that i915_error_work_func can acquire them).
2513 */
2514
2515 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002516 for_each_engine(engine, dev_priv)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002517 wake_up_all(&engine->irq_queue);
Daniel Vetter17e1df02013-09-08 21:57:13 +02002518
2519 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2520 wake_up_all(&dev_priv->pending_flip_queue);
2521
2522 /*
2523 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2524 * reset state is cleared.
2525 */
2526 if (reset_completed)
2527 wake_up_all(&dev_priv->gpu_error.reset_queue);
2528}
2529
Jesse Barnes8a905232009-07-11 16:48:03 -04002530/**
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02002531 * i915_reset_and_wakeup - do process context error handling work
Javier Martinez Canillas468f9d22015-10-08 09:54:44 +02002532 * @dev: drm device
Jesse Barnes8a905232009-07-11 16:48:03 -04002533 *
2534 * Fire an error uevent so userspace can see that a hang or error
2535 * was detected.
2536 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002537static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
Jesse Barnes8a905232009-07-11 16:48:03 -04002538{
Chris Wilsonc0336662016-05-06 15:40:21 +01002539 struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
Ben Widawskycce723e2013-07-19 09:16:42 -07002540 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2541 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2542 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
Daniel Vetter17e1df02013-09-08 21:57:13 +02002543 int ret;
Jesse Barnes8a905232009-07-11 16:48:03 -04002544
Chris Wilsonc0336662016-05-06 15:40:21 +01002545 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04002546
Daniel Vetter7db0ba22012-12-06 16:23:37 +01002547 /*
2548 * Note that there's only one work item which does gpu resets, so we
2549 * need not worry about concurrent gpu resets potentially incrementing
2550 * error->reset_counter twice. We only need to take care of another
2551 * racing irq/hangcheck declaring the gpu dead for a second time. A
2552 * quick check for that is good enough: schedule_work ensures the
2553 * correct ordering between hang detection and this work item, and since
2554 * the reset in-progress bit is only ever set by code outside of this
2555 * work we don't need to worry about any other races.
2556 */
Chris Wilsond98c52c2016-04-13 17:35:05 +01002557 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
Chris Wilsonf803aa52010-09-19 12:38:26 +01002558 DRM_DEBUG_DRIVER("resetting chip\n");
Chris Wilsonc0336662016-05-06 15:40:21 +01002559 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002560
Daniel Vetter17e1df02013-09-08 21:57:13 +02002561 /*
Imre Deakf454c692014-04-23 01:09:04 +03002562 * In most cases it's guaranteed that we get here with an RPM
2563 * reference held, for example because there is a pending GPU
2564 * request that won't finish until the reset is done. This
2565 * isn't the case at least when we get here by doing a
2566 * simulated reset via debugs, so get an RPM reference.
2567 */
2568 intel_runtime_pm_get(dev_priv);
Ville Syrjälä75147472014-11-24 18:28:11 +02002569
Chris Wilsonc0336662016-05-06 15:40:21 +01002570 intel_prepare_reset(dev_priv);
Ville Syrjälä75147472014-11-24 18:28:11 +02002571
Imre Deakf454c692014-04-23 01:09:04 +03002572 /*
Daniel Vetter17e1df02013-09-08 21:57:13 +02002573 * All state reset _must_ be completed before we update the
2574 * reset counter, for otherwise waiters might miss the reset
2575 * pending state and not properly drop locks, resulting in
2576 * deadlocks with the reset work.
2577 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002578 ret = i915_reset(dev_priv);
Daniel Vetterf69061b2012-12-06 09:01:42 +01002579
Chris Wilsonc0336662016-05-06 15:40:21 +01002580 intel_finish_reset(dev_priv);
Daniel Vetter17e1df02013-09-08 21:57:13 +02002581
Imre Deakf454c692014-04-23 01:09:04 +03002582 intel_runtime_pm_put(dev_priv);
2583
Chris Wilsond98c52c2016-04-13 17:35:05 +01002584 if (ret == 0)
Chris Wilsonc0336662016-05-06 15:40:21 +01002585 kobject_uevent_env(kobj,
Daniel Vetterf69061b2012-12-06 09:01:42 +01002586 KOBJ_CHANGE, reset_done_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002587
Daniel Vetter17e1df02013-09-08 21:57:13 +02002588 /*
2589 * Note: The wake_up also serves as a memory barrier so that
2590 * waiters see the update value of the reset counter atomic_t.
2591 */
2592 i915_error_wake_up(dev_priv, true);
Ben Gamarif316a422009-09-14 17:48:46 -04002593 }
Jesse Barnes8a905232009-07-11 16:48:03 -04002594}
2595
Chris Wilsonc0336662016-05-06 15:40:21 +01002596static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
Jesse Barnes8a905232009-07-11 16:48:03 -04002597{
Ben Widawskybd9854f2012-08-23 15:18:09 -07002598 uint32_t instdone[I915_NUM_INSTDONE_REG];
Jesse Barnes8a905232009-07-11 16:48:03 -04002599 u32 eir = I915_READ(EIR);
Ben Widawsky050ee912012-08-22 11:32:15 -07002600 int pipe, i;
Jesse Barnes8a905232009-07-11 16:48:03 -04002601
Chris Wilson35aed2e2010-05-27 13:18:12 +01002602 if (!eir)
2603 return;
Jesse Barnes8a905232009-07-11 16:48:03 -04002604
Joe Perchesa70491c2012-03-18 13:00:11 -07002605 pr_err("render error detected, EIR: 0x%08x\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04002606
Chris Wilsonc0336662016-05-06 15:40:21 +01002607 i915_get_extra_instdone(dev_priv, instdone);
Ben Widawskybd9854f2012-08-23 15:18:09 -07002608
Chris Wilsonc0336662016-05-06 15:40:21 +01002609 if (IS_G4X(dev_priv)) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002610 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2611 u32 ipeir = I915_READ(IPEIR_I965);
2612
Joe Perchesa70491c2012-03-18 13:00:11 -07002613 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2614 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Ben Widawsky050ee912012-08-22 11:32:15 -07002615 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2616 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Joe Perchesa70491c2012-03-18 13:00:11 -07002617 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002618 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002619 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002620 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002621 }
2622 if (eir & GM45_ERROR_PAGE_TABLE) {
2623 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002624 pr_err("page table error\n");
2625 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002626 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002627 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002628 }
2629 }
2630
Chris Wilsonc0336662016-05-06 15:40:21 +01002631 if (!IS_GEN2(dev_priv)) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002632 if (eir & I915_ERROR_PAGE_TABLE) {
2633 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002634 pr_err("page table error\n");
2635 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002636 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002637 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002638 }
2639 }
2640
2641 if (eir & I915_ERROR_MEMORY_REFRESH) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002642 pr_err("memory refresh error:\n");
Damien Lespiau055e3932014-08-18 13:49:10 +01002643 for_each_pipe(dev_priv, pipe)
Joe Perchesa70491c2012-03-18 13:00:11 -07002644 pr_err("pipe %c stat: 0x%08x\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002645 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
Jesse Barnes8a905232009-07-11 16:48:03 -04002646 /* pipestat has already been acked */
2647 }
2648 if (eir & I915_ERROR_INSTRUCTION) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002649 pr_err("instruction error\n");
2650 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
Ben Widawsky050ee912012-08-22 11:32:15 -07002651 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2652 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Chris Wilsonc0336662016-05-06 15:40:21 +01002653 if (INTEL_GEN(dev_priv) < 4) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002654 u32 ipeir = I915_READ(IPEIR);
2655
Joe Perchesa70491c2012-03-18 13:00:11 -07002656 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2657 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
Joe Perchesa70491c2012-03-18 13:00:11 -07002658 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
Jesse Barnes8a905232009-07-11 16:48:03 -04002659 I915_WRITE(IPEIR, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002660 POSTING_READ(IPEIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002661 } else {
2662 u32 ipeir = I915_READ(IPEIR_I965);
2663
Joe Perchesa70491c2012-03-18 13:00:11 -07002664 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2665 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Joe Perchesa70491c2012-03-18 13:00:11 -07002666 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002667 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002668 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002669 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002670 }
2671 }
2672
2673 I915_WRITE(EIR, eir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002674 POSTING_READ(EIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002675 eir = I915_READ(EIR);
2676 if (eir) {
2677 /*
2678 * some errors might have become stuck,
2679 * mask them.
2680 */
2681 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2682 I915_WRITE(EMR, I915_READ(EMR) | eir);
2683 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2684 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01002685}
2686
2687/**
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02002688 * i915_handle_error - handle a gpu error
Chris Wilson35aed2e2010-05-27 13:18:12 +01002689 * @dev: drm device
arun.siluvery@linux.intel.com14b730f2016-03-18 20:07:55 +00002690 * @engine_mask: mask representing engines that are hung
Javier Martinez Canillasaafd8582015-10-08 09:57:49 +02002691 * Do some basic checking of register state at error time and
Chris Wilson35aed2e2010-05-27 13:18:12 +01002692 * dump it to the syslog. Also call i915_capture_error_state() to make
2693 * sure we get a record and make it available in debugfs. Fire a uevent
2694 * so userspace knows something bad happened (should trigger collection
2695 * of a ring dump etc.).
2696 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002697void i915_handle_error(struct drm_i915_private *dev_priv,
2698 u32 engine_mask,
Mika Kuoppala58174462014-02-25 17:11:26 +02002699 const char *fmt, ...)
Chris Wilson35aed2e2010-05-27 13:18:12 +01002700{
Mika Kuoppala58174462014-02-25 17:11:26 +02002701 va_list args;
2702 char error_msg[80];
Chris Wilson35aed2e2010-05-27 13:18:12 +01002703
Mika Kuoppala58174462014-02-25 17:11:26 +02002704 va_start(args, fmt);
2705 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2706 va_end(args);
2707
Chris Wilsonc0336662016-05-06 15:40:21 +01002708 i915_capture_error_state(dev_priv, engine_mask, error_msg);
2709 i915_report_and_clear_eir(dev_priv);
Jesse Barnes8a905232009-07-11 16:48:03 -04002710
arun.siluvery@linux.intel.com14b730f2016-03-18 20:07:55 +00002711 if (engine_mask) {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002712 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
Daniel Vetterf69061b2012-12-06 09:01:42 +01002713 &dev_priv->gpu_error.reset_counter);
Ben Gamariba1234d2009-09-14 17:48:47 -04002714
Ben Gamari11ed50e2009-09-14 17:48:45 -04002715 /*
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02002716 * Wakeup waiting processes so that the reset function
2717 * i915_reset_and_wakeup doesn't deadlock trying to grab
2718 * various locks. By bumping the reset counter first, the woken
Daniel Vetter17e1df02013-09-08 21:57:13 +02002719 * processes will see a reset in progress and back off,
2720 * releasing their locks and then wait for the reset completion.
2721 * We must do this for _all_ gpu waiters that might hold locks
2722 * that the reset work needs to acquire.
2723 *
2724 * Note: The wake_up serves as the required memory barrier to
2725 * ensure that the waiters see the updated value of the reset
2726 * counter atomic_t.
Ben Gamari11ed50e2009-09-14 17:48:45 -04002727 */
Daniel Vetter17e1df02013-09-08 21:57:13 +02002728 i915_error_wake_up(dev_priv, false);
Ben Gamari11ed50e2009-09-14 17:48:45 -04002729 }
2730
Chris Wilsonc0336662016-05-06 15:40:21 +01002731 i915_reset_and_wakeup(dev_priv);
Jesse Barnes8a905232009-07-11 16:48:03 -04002732}
2733
Keith Packard42f52ef2008-10-18 19:39:29 -07002734/* Called from drm generic code, passed 'crtc' which
2735 * we use as a pipe index
2736 */
Thierry Reding88e72712015-09-24 18:35:31 +02002737static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002738{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002739 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002740 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002741
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002742 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002743 if (INTEL_INFO(dev)->gen >= 4)
Keith Packard7c463582008-11-04 02:03:27 -08002744 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002745 PIPE_START_VBLANK_INTERRUPT_STATUS);
Keith Packarde9d21d72008-10-16 11:31:38 -07002746 else
Keith Packard7c463582008-11-04 02:03:27 -08002747 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002748 PIPE_VBLANK_INTERRUPT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002749 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002750
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002751 return 0;
2752}
2753
Thierry Reding88e72712015-09-24 18:35:31 +02002754static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002755{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002756 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesf796cf82011-04-07 13:58:17 -07002757 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002758 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c22013-10-21 18:04:36 +02002759 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002760
Jesse Barnesf796cf82011-04-07 13:58:17 -07002761 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02002762 ilk_enable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002763 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2764
2765 return 0;
2766}
2767
Thierry Reding88e72712015-09-24 18:35:31 +02002768static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002769{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002770 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002771 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002772
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002773 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002774 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002775 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002776 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2777
2778 return 0;
2779}
2780
Thierry Reding88e72712015-09-24 18:35:31 +02002781static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
Ben Widawskyabd58f02013-11-02 21:07:09 -07002782{
2783 struct drm_i915_private *dev_priv = dev->dev_private;
2784 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002785
Ben Widawskyabd58f02013-11-02 21:07:09 -07002786 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjälä013d3752015-11-23 18:06:17 +02002787 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002788 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Ville Syrjälä013d3752015-11-23 18:06:17 +02002789
Ben Widawskyabd58f02013-11-02 21:07:09 -07002790 return 0;
2791}
2792
Keith Packard42f52ef2008-10-18 19:39:29 -07002793/* Called from drm generic code, passed 'crtc' which
2794 * we use as a pipe index
2795 */
Thierry Reding88e72712015-09-24 18:35:31 +02002796static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002797{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002798 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002799 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002800
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002801 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002802 i915_disable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002803 PIPE_VBLANK_INTERRUPT_STATUS |
2804 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002805 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2806}
2807
Thierry Reding88e72712015-09-24 18:35:31 +02002808static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002809{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002810 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesf796cf82011-04-07 13:58:17 -07002811 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002812 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c22013-10-21 18:04:36 +02002813 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002814
2815 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02002816 ilk_disable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002817 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2818}
2819
Thierry Reding88e72712015-09-24 18:35:31 +02002820static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002821{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002822 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002823 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002824
2825 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002826 i915_disable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002827 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002828 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2829}
2830
Thierry Reding88e72712015-09-24 18:35:31 +02002831static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
Ben Widawskyabd58f02013-11-02 21:07:09 -07002832{
2833 struct drm_i915_private *dev_priv = dev->dev_private;
2834 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002835
Ben Widawskyabd58f02013-11-02 21:07:09 -07002836 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjälä013d3752015-11-23 18:06:17 +02002837 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002838 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2839}
2840
Chris Wilson9107e9d2013-06-10 11:20:20 +01002841static bool
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002842ring_idle(struct intel_engine_cs *engine, u32 seqno)
Chris Wilson893eead2010-10-27 14:44:35 +01002843{
Chris Wilsoncffa7812016-04-07 07:29:18 +01002844 return i915_seqno_passed(seqno,
2845 READ_ONCE(engine->last_submitted_seqno));
Ben Gamarif65d9422009-09-14 17:48:44 -04002846}
2847
Daniel Vettera028c4b2014-03-15 00:08:56 +01002848static bool
Chris Wilsonc0336662016-05-06 15:40:21 +01002849ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
Daniel Vettera028c4b2014-03-15 00:08:56 +01002850{
Chris Wilsonc0336662016-05-06 15:40:21 +01002851 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002852 return (ipehr >> 23) == 0x1c;
Daniel Vettera028c4b2014-03-15 00:08:56 +01002853 } else {
2854 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2855 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2856 MI_SEMAPHORE_REGISTER);
2857 }
2858}
2859
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002860static struct intel_engine_cs *
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002861semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2862 u64 offset)
Daniel Vetter921d42e2014-03-18 10:26:04 +01002863{
Chris Wilsonc0336662016-05-06 15:40:21 +01002864 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002865 struct intel_engine_cs *signaller;
Daniel Vetter921d42e2014-03-18 10:26:04 +01002866
Chris Wilsonc0336662016-05-06 15:40:21 +01002867 if (INTEL_GEN(dev_priv) >= 8) {
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002868 for_each_engine(signaller, dev_priv) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002869 if (engine == signaller)
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002870 continue;
2871
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002872 if (offset == signaller->semaphore.signal_ggtt[engine->id])
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002873 return signaller;
2874 }
Daniel Vetter921d42e2014-03-18 10:26:04 +01002875 } else {
2876 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2877
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002878 for_each_engine(signaller, dev_priv) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002879 if(engine == signaller)
Daniel Vetter921d42e2014-03-18 10:26:04 +01002880 continue;
2881
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002882 if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
Daniel Vetter921d42e2014-03-18 10:26:04 +01002883 return signaller;
2884 }
2885 }
2886
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002887 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002888 engine->id, ipehr, offset);
Daniel Vetter921d42e2014-03-18 10:26:04 +01002889
2890 return NULL;
2891}
2892
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002893static struct intel_engine_cs *
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002894semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
Chris Wilsona24a11e2013-03-14 17:52:05 +02002895{
Chris Wilsonc0336662016-05-06 15:40:21 +01002896 struct drm_i915_private *dev_priv = engine->i915;
Daniel Vetter88fe4292014-03-15 00:08:55 +01002897 u32 cmd, ipehr, head;
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002898 u64 offset = 0;
2899 int i, backwards;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002900
Tomas Elf381e8ae2015-10-08 19:31:33 +01002901 /*
2902 * This function does not support execlist mode - any attempt to
2903 * proceed further into this function will result in a kernel panic
2904 * when dereferencing ring->buffer, which is not set up in execlist
2905 * mode.
2906 *
2907 * The correct way of doing it would be to derive the currently
2908 * executing ring buffer from the current context, which is derived
2909 * from the currently running request. Unfortunately, to get the
2910 * current request we would have to grab the struct_mutex before doing
2911 * anything else, which would be ill-advised since some other thread
2912 * might have grabbed it already and managed to hang itself, causing
2913 * the hang checker to deadlock.
2914 *
2915 * Therefore, this function does not support execlist mode in its
2916 * current form. Just return NULL and move on.
2917 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002918 if (engine->buffer == NULL)
Tomas Elf381e8ae2015-10-08 19:31:33 +01002919 return NULL;
2920
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002921 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
Chris Wilsonc0336662016-05-06 15:40:21 +01002922 if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
Chris Wilson6274f212013-06-10 11:20:21 +01002923 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002924
Daniel Vetter88fe4292014-03-15 00:08:55 +01002925 /*
2926 * HEAD is likely pointing to the dword after the actual command,
2927 * so scan backwards until we find the MBOX. But limit it to just 3
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002928 * or 4 dwords depending on the semaphore wait command size.
2929 * Note that we don't care about ACTHD here since that might
Daniel Vetter88fe4292014-03-15 00:08:55 +01002930 * point at at batch, and semaphores are always emitted into the
2931 * ringbuffer itself.
Chris Wilsona24a11e2013-03-14 17:52:05 +02002932 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002933 head = I915_READ_HEAD(engine) & HEAD_ADDR;
Chris Wilsonc0336662016-05-06 15:40:21 +01002934 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
Daniel Vetter88fe4292014-03-15 00:08:55 +01002935
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002936 for (i = backwards; i; --i) {
Daniel Vetter88fe4292014-03-15 00:08:55 +01002937 /*
2938 * Be paranoid and presume the hw has gone off into the wild -
2939 * our ring is smaller than what the hardware (and hence
2940 * HEAD_ADDR) allows. Also handles wrap-around.
2941 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002942 head &= engine->buffer->size - 1;
Daniel Vetter88fe4292014-03-15 00:08:55 +01002943
2944 /* This here seems to blow up */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002945 cmd = ioread32(engine->buffer->virtual_start + head);
Chris Wilsona24a11e2013-03-14 17:52:05 +02002946 if (cmd == ipehr)
2947 break;
2948
Daniel Vetter88fe4292014-03-15 00:08:55 +01002949 head -= 4;
2950 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02002951
Daniel Vetter88fe4292014-03-15 00:08:55 +01002952 if (!i)
2953 return NULL;
2954
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002955 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
Chris Wilsonc0336662016-05-06 15:40:21 +01002956 if (INTEL_GEN(dev_priv) >= 8) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002957 offset = ioread32(engine->buffer->virtual_start + head + 12);
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002958 offset <<= 32;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002959 offset = ioread32(engine->buffer->virtual_start + head + 8);
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002960 }
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002961 return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
Chris Wilsona24a11e2013-03-14 17:52:05 +02002962}
2963
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002964static int semaphore_passed(struct intel_engine_cs *engine)
Chris Wilson6274f212013-06-10 11:20:21 +01002965{
Chris Wilsonc0336662016-05-06 15:40:21 +01002966 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002967 struct intel_engine_cs *signaller;
Chris Wilsona0d036b2014-07-19 12:40:42 +01002968 u32 seqno;
Chris Wilson6274f212013-06-10 11:20:21 +01002969
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002970 engine->hangcheck.deadlock++;
Chris Wilson6274f212013-06-10 11:20:21 +01002971
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002972 signaller = semaphore_waits_for(engine, &seqno);
Chris Wilson4be17382014-06-06 10:22:29 +01002973 if (signaller == NULL)
2974 return -1;
2975
2976 /* Prevent pathological recursion due to driver bugs */
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00002977 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
Chris Wilson6274f212013-06-10 11:20:21 +01002978 return -1;
2979
Chris Wilsonc04e0f32016-04-09 10:57:54 +01002980 if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
Chris Wilson4be17382014-06-06 10:22:29 +01002981 return 1;
2982
Chris Wilsona0d036b2014-07-19 12:40:42 +01002983 /* cursory check for an unkickable deadlock */
2984 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2985 semaphore_passed(signaller) < 0)
Chris Wilson4be17382014-06-06 10:22:29 +01002986 return -1;
2987
2988 return 0;
Chris Wilson6274f212013-06-10 11:20:21 +01002989}
2990
2991static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2992{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002993 struct intel_engine_cs *engine;
Chris Wilson6274f212013-06-10 11:20:21 +01002994
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002995 for_each_engine(engine, dev_priv)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002996 engine->hangcheck.deadlock = 0;
Chris Wilson6274f212013-06-10 11:20:21 +01002997}
2998
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002999static bool subunits_stuck(struct intel_engine_cs *engine)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003000{
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003001 u32 instdone[I915_NUM_INSTDONE_REG];
3002 bool stuck;
3003 int i;
Chris Wilson9107e9d2013-06-10 11:20:20 +01003004
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003005 if (engine->id != RCS)
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003006 return true;
3007
Chris Wilsonc0336662016-05-06 15:40:21 +01003008 i915_get_extra_instdone(engine->i915, instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003009
3010 /* There might be unstable subunit states even when
3011 * actual head is not moving. Filter out the unstable ones by
3012 * accumulating the undone -> done transitions and only
3013 * consider those as progress.
3014 */
3015 stuck = true;
3016 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003017 const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003018
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003019 if (tmp != engine->hangcheck.instdone[i])
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003020 stuck = false;
3021
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003022 engine->hangcheck.instdone[i] |= tmp;
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003023 }
3024
3025 return stuck;
3026}
3027
3028static enum intel_ring_hangcheck_action
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003029head_stuck(struct intel_engine_cs *engine, u64 acthd)
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003030{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003031 if (acthd != engine->hangcheck.acthd) {
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003032
3033 /* Clear subunit states on head movement */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003034 memset(engine->hangcheck.instdone, 0,
3035 sizeof(engine->hangcheck.instdone));
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003036
Mika Kuoppala24a65e62016-03-02 16:48:29 +02003037 return HANGCHECK_ACTIVE;
Mika Kuoppalaf260fe72014-08-05 17:16:26 +03003038 }
Chris Wilson6274f212013-06-10 11:20:21 +01003039
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003040 if (!subunits_stuck(engine))
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003041 return HANGCHECK_ACTIVE;
3042
3043 return HANGCHECK_HUNG;
3044}
3045
3046static enum intel_ring_hangcheck_action
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003047ring_stuck(struct intel_engine_cs *engine, u64 acthd)
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003048{
Chris Wilsonc0336662016-05-06 15:40:21 +01003049 struct drm_i915_private *dev_priv = engine->i915;
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003050 enum intel_ring_hangcheck_action ha;
3051 u32 tmp;
3052
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003053 ha = head_stuck(engine, acthd);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003054 if (ha != HANGCHECK_HUNG)
3055 return ha;
3056
Chris Wilsonc0336662016-05-06 15:40:21 +01003057 if (IS_GEN2(dev_priv))
Jani Nikulaf2f4d822013-08-11 12:44:01 +03003058 return HANGCHECK_HUNG;
Chris Wilson9107e9d2013-06-10 11:20:20 +01003059
3060 /* Is the chip hanging on a WAIT_FOR_EVENT?
3061 * If so we can simply poke the RB_WAIT bit
3062 * and break the hang. This should work on
3063 * all but the second generation chipsets.
3064 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003065 tmp = I915_READ_CTL(engine);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003066 if (tmp & RING_WAIT) {
Chris Wilsonc0336662016-05-06 15:40:21 +01003067 i915_handle_error(dev_priv, 0,
Mika Kuoppala58174462014-02-25 17:11:26 +02003068 "Kicking stuck wait on %s",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003069 engine->name);
3070 I915_WRITE_CTL(engine, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03003071 return HANGCHECK_KICK;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003072 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02003073
Chris Wilsonc0336662016-05-06 15:40:21 +01003074 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003075 switch (semaphore_passed(engine)) {
Chris Wilson6274f212013-06-10 11:20:21 +01003076 default:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03003077 return HANGCHECK_HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01003078 case 1:
Chris Wilsonc0336662016-05-06 15:40:21 +01003079 i915_handle_error(dev_priv, 0,
Mika Kuoppala58174462014-02-25 17:11:26 +02003080 "Kicking stuck semaphore on %s",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003081 engine->name);
3082 I915_WRITE_CTL(engine, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03003083 return HANGCHECK_KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01003084 case 0:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03003085 return HANGCHECK_WAIT;
Chris Wilson6274f212013-06-10 11:20:21 +01003086 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01003087 }
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03003088
Jani Nikulaf2f4d822013-08-11 12:44:01 +03003089 return HANGCHECK_HUNG;
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03003090}
3091
Chris Wilson12471ba2016-04-09 10:57:55 +01003092static unsigned kick_waiters(struct intel_engine_cs *engine)
3093{
Chris Wilsonc0336662016-05-06 15:40:21 +01003094 struct drm_i915_private *i915 = engine->i915;
Chris Wilson12471ba2016-04-09 10:57:55 +01003095 unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
3096
3097 if (engine->hangcheck.user_interrupts == user_interrupts &&
3098 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
3099 if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
3100 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3101 engine->name);
3102 else
3103 DRM_INFO("Fake missed irq on %s\n",
3104 engine->name);
3105 wake_up_all(&engine->irq_queue);
3106 }
3107
3108 return user_interrupts;
3109}
Chris Wilson737b1502015-01-26 18:03:03 +02003110/*
Ben Gamarif65d9422009-09-14 17:48:44 -04003111 * This is called when the chip hasn't reported back with completed
Mika Kuoppala05407ff2013-05-30 09:04:29 +03003112 * batchbuffers in a long time. We keep track per ring seqno progress and
3113 * if there are no progress, hangcheck score for that ring is increased.
3114 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3115 * we kick the ring. If we see no progress on three subsequent calls
3116 * we assume chip is wedged and try to fix it by resetting the chip.
Ben Gamarif65d9422009-09-14 17:48:44 -04003117 */
Chris Wilson737b1502015-01-26 18:03:03 +02003118static void i915_hangcheck_elapsed(struct work_struct *work)
Ben Gamarif65d9422009-09-14 17:48:44 -04003119{
Chris Wilson737b1502015-01-26 18:03:03 +02003120 struct drm_i915_private *dev_priv =
3121 container_of(work, typeof(*dev_priv),
3122 gpu_error.hangcheck_work.work);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003123 struct intel_engine_cs *engine;
Dave Gordonc3232b12016-03-23 18:19:53 +00003124 enum intel_engine_id id;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03003125 int busy_count = 0, rings_hung = 0;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00003126 bool stuck[I915_NUM_ENGINES] = { 0 };
Chris Wilson9107e9d2013-06-10 11:20:20 +01003127#define BUSY 1
3128#define KICK 5
3129#define HUNG 20
Mika Kuoppala24a65e62016-03-02 16:48:29 +02003130#define ACTIVE_DECAY 15
Chris Wilson893eead2010-10-27 14:44:35 +01003131
Jani Nikulad330a952014-01-21 11:24:25 +02003132 if (!i915.enable_hangcheck)
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07003133 return;
3134
Imre Deak1f814da2015-12-16 02:52:19 +02003135 /*
3136 * The hangcheck work is synced during runtime suspend, we don't
3137 * require a wakeref. TODO: instead of disabling the asserts make
3138 * sure that we hold a reference when this work is running.
3139 */
3140 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3141
Mika Kuoppala75714942015-12-16 09:26:48 +02003142 /* As enabling the GPU requires fairly extensive mmio access,
3143 * periodically arm the mmio checker to see if we are triggering
3144 * any invalid access.
3145 */
3146 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3147
Dave Gordonc3232b12016-03-23 18:19:53 +00003148 for_each_engine_id(engine, dev_priv, id) {
Chris Wilson50877442014-03-21 12:41:53 +00003149 u64 acthd;
3150 u32 seqno;
Chris Wilson12471ba2016-04-09 10:57:55 +01003151 unsigned user_interrupts;
Chris Wilson9107e9d2013-06-10 11:20:20 +01003152 bool busy = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01003153
Chris Wilson6274f212013-06-10 11:20:21 +01003154 semaphore_clear_deadlocks(dev_priv);
3155
Chris Wilsonc04e0f32016-04-09 10:57:54 +01003156 /* We don't strictly need an irq-barrier here, as we are not
3157 * serving an interrupt request, be paranoid in case the
3158 * barrier has side-effects (such as preventing a broken
3159 * cacheline snoop) and so be sure that we can see the seqno
3160 * advance. If the seqno should stick, due to a stale
3161 * cacheline, we would erroneously declare the GPU hung.
3162 */
3163 if (engine->irq_seqno_barrier)
3164 engine->irq_seqno_barrier(engine);
3165
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003166 acthd = intel_ring_get_active_head(engine);
Chris Wilsonc04e0f32016-04-09 10:57:54 +01003167 seqno = engine->get_seqno(engine);
Chris Wilsond1e61e72012-04-10 17:00:41 +01003168
Chris Wilson12471ba2016-04-09 10:57:55 +01003169 /* Reset stuck interrupts between batch advances */
3170 user_interrupts = 0;
3171
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003172 if (engine->hangcheck.seqno == seqno) {
3173 if (ring_idle(engine, seqno)) {
3174 engine->hangcheck.action = HANGCHECK_IDLE;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003175 if (waitqueue_active(&engine->irq_queue)) {
Chris Wilson094f9a52013-09-25 17:34:55 +01003176 /* Safeguard against driver failure */
Chris Wilson12471ba2016-04-09 10:57:55 +01003177 user_interrupts = kick_waiters(engine);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003178 engine->hangcheck.score += BUSY;
Chris Wilson9107e9d2013-06-10 11:20:20 +01003179 } else
3180 busy = false;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03003181 } else {
Chris Wilson6274f212013-06-10 11:20:21 +01003182 /* We always increment the hangcheck score
3183 * if the ring is busy and still processing
3184 * the same request, so that no single request
3185 * can run indefinitely (such as a chain of
3186 * batches). The only time we do not increment
3187 * the hangcheck score on this ring, if this
3188 * ring is in a legitimate wait for another
3189 * ring. In that case the waiting ring is a
3190 * victim and we want to be sure we catch the
3191 * right culprit. Then every time we do kick
3192 * the ring, add a small increment to the
3193 * score so that we can catch a batch that is
3194 * being repeatedly kicked and so responsible
3195 * for stalling the machine.
3196 */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003197 engine->hangcheck.action = ring_stuck(engine,
3198 acthd);
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03003199
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003200 switch (engine->hangcheck.action) {
Mika Kuoppalada661462013-09-06 16:03:28 +03003201 case HANGCHECK_IDLE:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03003202 case HANGCHECK_WAIT:
Mika Kuoppalaf260fe72014-08-05 17:16:26 +03003203 break;
Mika Kuoppala24a65e62016-03-02 16:48:29 +02003204 case HANGCHECK_ACTIVE:
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003205 engine->hangcheck.score += BUSY;
Chris Wilson6274f212013-06-10 11:20:21 +01003206 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03003207 case HANGCHECK_KICK:
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003208 engine->hangcheck.score += KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01003209 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03003210 case HANGCHECK_HUNG:
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003211 engine->hangcheck.score += HUNG;
Dave Gordonc3232b12016-03-23 18:19:53 +00003212 stuck[id] = true;
Chris Wilson6274f212013-06-10 11:20:21 +01003213 break;
3214 }
Mika Kuoppala05407ff2013-05-30 09:04:29 +03003215 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01003216 } else {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003217 engine->hangcheck.action = HANGCHECK_ACTIVE;
Mika Kuoppalada661462013-09-06 16:03:28 +03003218
Chris Wilson9107e9d2013-06-10 11:20:20 +01003219 /* Gradually reduce the count so that we catch DoS
3220 * attempts across multiple batches.
3221 */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003222 if (engine->hangcheck.score > 0)
3223 engine->hangcheck.score -= ACTIVE_DECAY;
3224 if (engine->hangcheck.score < 0)
3225 engine->hangcheck.score = 0;
Mika Kuoppalaf260fe72014-08-05 17:16:26 +03003226
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003227 /* Clear head and subunit states on seqno movement */
Chris Wilson12471ba2016-04-09 10:57:55 +01003228 acthd = 0;
Mika Kuoppala61642ff2015-12-01 17:56:12 +02003229
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003230 memset(engine->hangcheck.instdone, 0,
3231 sizeof(engine->hangcheck.instdone));
Chris Wilsond1e61e72012-04-10 17:00:41 +01003232 }
3233
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003234 engine->hangcheck.seqno = seqno;
3235 engine->hangcheck.acthd = acthd;
Chris Wilson12471ba2016-04-09 10:57:55 +01003236 engine->hangcheck.user_interrupts = user_interrupts;
Chris Wilson9107e9d2013-06-10 11:20:20 +01003237 busy_count += busy;
Chris Wilson893eead2010-10-27 14:44:35 +01003238 }
Eric Anholtb9201c12010-01-08 14:25:16 -08003239
Dave Gordonc3232b12016-03-23 18:19:53 +00003240 for_each_engine_id(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003241 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
Daniel Vetterb8d88d12013-08-28 10:57:59 +02003242 DRM_INFO("%s on %s\n",
Dave Gordonc3232b12016-03-23 18:19:53 +00003243 stuck[id] ? "stuck" : "no progress",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003244 engine->name);
arun.siluvery@linux.intel.com14b730f2016-03-18 20:07:55 +00003245 rings_hung |= intel_engine_flag(engine);
Mika Kuoppala92cab732013-05-24 17:16:07 +03003246 }
3247 }
3248
Imre Deak1f814da2015-12-16 02:52:19 +02003249 if (rings_hung) {
Chris Wilsonc0336662016-05-06 15:40:21 +01003250 i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
Imre Deak1f814da2015-12-16 02:52:19 +02003251 goto out;
3252 }
Ben Gamarif65d9422009-09-14 17:48:44 -04003253
Mika Kuoppala05407ff2013-05-30 09:04:29 +03003254 if (busy_count)
3255 /* Reset timer case chip hangs without another request
3256 * being added */
Chris Wilsonc0336662016-05-06 15:40:21 +01003257 i915_queue_hangcheck(dev_priv);
Imre Deak1f814da2015-12-16 02:52:19 +02003258
3259out:
3260 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03003261}
3262
Chris Wilsonc0336662016-05-06 15:40:21 +01003263void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03003264{
Chris Wilsonc0336662016-05-06 15:40:21 +01003265 struct i915_gpu_error *e = &dev_priv->gpu_error;
Chris Wilson672e7b72014-11-19 09:47:19 +00003266
Jani Nikulad330a952014-01-21 11:24:25 +02003267 if (!i915.enable_hangcheck)
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03003268 return;
3269
Chris Wilson737b1502015-01-26 18:03:03 +02003270 /* Don't continually defer the hangcheck so that it is always run at
3271 * least once after work has been scheduled on any ring. Otherwise,
3272 * we will ignore a hung ring if a second ring is kept busy.
3273 */
3274
3275 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3276 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
Ben Gamarif65d9422009-09-14 17:48:44 -04003277}
3278
Paulo Zanoni1c69eb42014-04-01 15:37:23 -03003279static void ibx_irq_reset(struct drm_device *dev)
Paulo Zanoni91738a92013-06-05 14:21:51 -03003280{
3281 struct drm_i915_private *dev_priv = dev->dev_private;
3282
3283 if (HAS_PCH_NOP(dev))
3284 return;
3285
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03003286 GEN5_IRQ_RESET(SDE);
Paulo Zanoni105b1222014-04-01 15:37:17 -03003287
3288 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3289 I915_WRITE(SERR_INT, 0xffffffff);
Paulo Zanoni622364b2014-04-01 15:37:22 -03003290}
Paulo Zanoni105b1222014-04-01 15:37:17 -03003291
Paulo Zanoni622364b2014-04-01 15:37:22 -03003292/*
3293 * SDEIER is also touched by the interrupt handler to work around missed PCH
3294 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3295 * instead we unconditionally enable all PCH interrupt sources here, but then
3296 * only unmask them as needed with SDEIMR.
3297 *
3298 * This function needs to be called before interrupts are enabled.
3299 */
3300static void ibx_irq_pre_postinstall(struct drm_device *dev)
3301{
3302 struct drm_i915_private *dev_priv = dev->dev_private;
3303
3304 if (HAS_PCH_NOP(dev))
3305 return;
3306
3307 WARN_ON(I915_READ(SDEIER) != 0);
Paulo Zanoni91738a92013-06-05 14:21:51 -03003308 I915_WRITE(SDEIER, 0xffffffff);
3309 POSTING_READ(SDEIER);
3310}
3311
Paulo Zanoni7c4d6642014-04-01 15:37:19 -03003312static void gen5_gt_irq_reset(struct drm_device *dev)
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02003313{
3314 struct drm_i915_private *dev_priv = dev->dev_private;
3315
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03003316 GEN5_IRQ_RESET(GT);
Paulo Zanonia9d356a2014-04-01 15:37:09 -03003317 if (INTEL_INFO(dev)->gen >= 6)
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03003318 GEN5_IRQ_RESET(GEN6_PM);
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02003319}
3320
Ville Syrjälä70591a42014-10-30 19:42:58 +02003321static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3322{
3323 enum pipe pipe;
3324
Ville Syrjälä71b8b412016-04-11 16:56:31 +03003325 if (IS_CHERRYVIEW(dev_priv))
3326 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3327 else
3328 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3329
Ville Syrjäläad22d102016-04-12 18:56:14 +03003330 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
Ville Syrjälä70591a42014-10-30 19:42:58 +02003331 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3332
Ville Syrjäläad22d102016-04-12 18:56:14 +03003333 for_each_pipe(dev_priv, pipe) {
3334 I915_WRITE(PIPESTAT(pipe),
3335 PIPE_FIFO_UNDERRUN_STATUS |
3336 PIPESTAT_INT_STATUS_MASK);
3337 dev_priv->pipestat_irq_mask[pipe] = 0;
3338 }
Ville Syrjälä70591a42014-10-30 19:42:58 +02003339
3340 GEN5_IRQ_RESET(VLV_);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003341 dev_priv->irq_mask = ~0;
Ville Syrjälä70591a42014-10-30 19:42:58 +02003342}
3343
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003344static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3345{
3346 u32 pipestat_mask;
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03003347 u32 enable_mask;
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003348 enum pipe pipe;
3349
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003350 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3351 PIPE_CRC_DONE_INTERRUPT_STATUS;
3352
3353 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3354 for_each_pipe(dev_priv, pipe)
3355 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3356
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03003357 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3358 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3359 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003360 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03003361 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
Ville Syrjälä6b7eafc2016-04-11 16:56:29 +03003362
3363 WARN_ON(dev_priv->irq_mask != ~0);
3364
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03003365 dev_priv->irq_mask = ~enable_mask;
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003366
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03003367 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003368}
3369
3370/* drm_dma.h hooks
3371*/
3372static void ironlake_irq_reset(struct drm_device *dev)
3373{
3374 struct drm_i915_private *dev_priv = dev->dev_private;
3375
3376 I915_WRITE(HWSTAM, 0xffffffff);
3377
3378 GEN5_IRQ_RESET(DE);
3379 if (IS_GEN7(dev))
3380 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3381
3382 gen5_gt_irq_reset(dev);
3383
3384 ibx_irq_reset(dev);
3385}
3386
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003387static void valleyview_irq_preinstall(struct drm_device *dev)
3388{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003389 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003390
Ville Syrjälä34c7b8a2016-04-13 21:19:48 +03003391 I915_WRITE(VLV_MASTER_IER, 0);
3392 POSTING_READ(VLV_MASTER_IER);
3393
Paulo Zanoni7c4d6642014-04-01 15:37:19 -03003394 gen5_gt_irq_reset(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003395
Ville Syrjäläad22d102016-04-12 18:56:14 +03003396 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003397 if (dev_priv->display_irqs_enabled)
3398 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003399 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003400}
3401
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02003402static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3403{
3404 GEN8_IRQ_RESET_NDX(GT, 0);
3405 GEN8_IRQ_RESET_NDX(GT, 1);
3406 GEN8_IRQ_RESET_NDX(GT, 2);
3407 GEN8_IRQ_RESET_NDX(GT, 3);
3408}
3409
Paulo Zanoni823f6b32014-04-01 15:37:26 -03003410static void gen8_irq_reset(struct drm_device *dev)
Ben Widawskyabd58f02013-11-02 21:07:09 -07003411{
3412 struct drm_i915_private *dev_priv = dev->dev_private;
3413 int pipe;
3414
Ben Widawskyabd58f02013-11-02 21:07:09 -07003415 I915_WRITE(GEN8_MASTER_IRQ, 0);
3416 POSTING_READ(GEN8_MASTER_IRQ);
3417
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02003418 gen8_gt_irq_reset(dev_priv);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003419
Damien Lespiau055e3932014-08-18 13:49:10 +01003420 for_each_pipe(dev_priv, pipe)
Daniel Vetterf458ebb2014-09-30 10:56:39 +02003421 if (intel_display_power_is_enabled(dev_priv,
3422 POWER_DOMAIN_PIPE(pipe)))
Paulo Zanoni813bde42014-07-04 11:50:29 -03003423 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003424
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03003425 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3426 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3427 GEN5_IRQ_RESET(GEN8_PCU_);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003428
Shashank Sharma266ea3d2014-08-22 17:40:42 +05303429 if (HAS_PCH_SPLIT(dev))
3430 ibx_irq_reset(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003431}
Ben Widawskyabd58f02013-11-02 21:07:09 -07003432
Damien Lespiau4c6c03b2015-03-06 18:50:48 +00003433void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3434 unsigned int pipe_mask)
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003435{
Paulo Zanoni1180e202014-10-07 18:02:52 -03003436 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003437 enum pipe pipe;
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003438
Daniel Vetter13321782014-09-15 14:55:29 +02003439 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003440 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3441 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3442 dev_priv->de_irq_mask[pipe],
3443 ~dev_priv->de_irq_mask[pipe] | extra_ier);
Daniel Vetter13321782014-09-15 14:55:29 +02003444 spin_unlock_irq(&dev_priv->irq_lock);
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003445}
3446
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003447void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3448 unsigned int pipe_mask)
3449{
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003450 enum pipe pipe;
3451
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003452 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003453 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3454 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003455 spin_unlock_irq(&dev_priv->irq_lock);
3456
3457 /* make sure we're done processing display irqs */
3458 synchronize_irq(dev_priv->dev->irq);
3459}
3460
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003461static void cherryview_irq_preinstall(struct drm_device *dev)
3462{
3463 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003464
3465 I915_WRITE(GEN8_MASTER_IRQ, 0);
3466 POSTING_READ(GEN8_MASTER_IRQ);
3467
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02003468 gen8_gt_irq_reset(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003469
3470 GEN5_IRQ_RESET(GEN8_PCU_);
3471
Ville Syrjäläad22d102016-04-12 18:56:14 +03003472 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003473 if (dev_priv->display_irqs_enabled)
3474 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003475 spin_unlock_irq(&dev_priv->irq_lock);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003476}
3477
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003478static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
Ville Syrjälä87a02102015-08-27 23:55:57 +03003479 const u32 hpd[HPD_NUM_PINS])
3480{
Ville Syrjälä87a02102015-08-27 23:55:57 +03003481 struct intel_encoder *encoder;
3482 u32 enabled_irqs = 0;
3483
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003484 for_each_intel_encoder(dev_priv->dev, encoder)
Ville Syrjälä87a02102015-08-27 23:55:57 +03003485 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3486 enabled_irqs |= hpd[encoder->hpd_pin];
3487
3488 return enabled_irqs;
3489}
3490
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003491static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
Keith Packard7fe0b972011-09-19 13:31:02 -07003492{
Ville Syrjälä87a02102015-08-27 23:55:57 +03003493 u32 hotplug_irqs, hotplug, enabled_irqs;
Keith Packard7fe0b972011-09-19 13:31:02 -07003494
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003495 if (HAS_PCH_IBX(dev_priv)) {
Daniel Vetterfee884e2013-07-04 23:35:21 +02003496 hotplug_irqs = SDE_HOTPLUG_MASK;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003497 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003498 } else {
Daniel Vetterfee884e2013-07-04 23:35:21 +02003499 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003500 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003501 }
3502
Daniel Vetterfee884e2013-07-04 23:35:21 +02003503 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003504
3505 /*
3506 * Enable digital hotplug on the PCH, and configure the DP short pulse
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003507 * duration to 2ms (which is the minimum in the Display Port spec).
3508 * The pulse duration bits are reserved on LPT+.
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003509 */
Keith Packard7fe0b972011-09-19 13:31:02 -07003510 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3511 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3512 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3513 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3514 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
Ville Syrjälä0b2eb332015-08-27 23:56:05 +03003515 /*
3516 * When CPU and PCH are on the same package, port A
3517 * HPD must be enabled in both north and south.
3518 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003519 if (HAS_PCH_LPT_LP(dev_priv))
Ville Syrjälä0b2eb332015-08-27 23:56:05 +03003520 hotplug |= PORTA_HOTPLUG_ENABLE;
Keith Packard7fe0b972011-09-19 13:31:02 -07003521 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003522}
Xiong Zhang26951ca2015-08-17 15:55:50 +08003523
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003524static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003525{
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003526 u32 hotplug_irqs, hotplug, enabled_irqs;
3527
3528 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003529 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003530
3531 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3532
3533 /* Enable digital hotplug on the PCH */
3534 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3535 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
Ville Syrjälä74c0b392015-08-27 23:56:07 +03003536 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003537 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3538
3539 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3540 hotplug |= PORTE_HOTPLUG_ENABLE;
3541 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
Keith Packard7fe0b972011-09-19 13:31:02 -07003542}
3543
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003544static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003545{
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003546 u32 hotplug_irqs, hotplug, enabled_irqs;
3547
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003548 if (INTEL_GEN(dev_priv) >= 8) {
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003549 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003550 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003551
3552 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003553 } else if (INTEL_GEN(dev_priv) >= 7) {
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03003554 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003555 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003556
3557 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03003558 } else {
3559 hotplug_irqs = DE_DP_A_HOTPLUG;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003560 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003561
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003562 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3563 }
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003564
3565 /*
3566 * Enable digital hotplug on the CPU, and configure the DP short pulse
3567 * duration to 2ms (which is the minimum in the Display Port spec)
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03003568 * The pulse duration bits are reserved on HSW+.
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003569 */
3570 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3571 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3572 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3573 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3574
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003575 ibx_hpd_irq_setup(dev_priv);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003576}
3577
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003578static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003579{
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003580 u32 hotplug_irqs, hotplug, enabled_irqs;
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003581
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003582 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003583 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003584
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003585 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003586
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003587 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3588 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3589 PORTA_HOTPLUG_ENABLE;
Shubhangi Shrivastavad252bf62016-03-31 16:11:47 +05303590
3591 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3592 hotplug, enabled_irqs);
3593 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3594
3595 /*
3596 * For BXT invert bit has to be set based on AOB design
3597 * for HPD detection logic, update it based on VBT fields.
3598 */
3599
3600 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3601 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3602 hotplug |= BXT_DDIA_HPD_INVERT;
3603 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3604 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3605 hotplug |= BXT_DDIB_HPD_INVERT;
3606 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3607 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3608 hotplug |= BXT_DDIC_HPD_INVERT;
3609
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003610 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003611}
3612
Paulo Zanonid46da432013-02-08 17:35:15 -02003613static void ibx_irq_postinstall(struct drm_device *dev)
3614{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003615 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003616 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02003617
Daniel Vetter692a04c2013-05-29 21:43:05 +02003618 if (HAS_PCH_NOP(dev))
3619 return;
3620
Paulo Zanoni105b1222014-04-01 15:37:17 -03003621 if (HAS_PCH_IBX(dev))
Daniel Vetter5c673b62014-03-07 20:34:46 +01003622 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
Paulo Zanoni105b1222014-04-01 15:37:17 -03003623 else
Daniel Vetter5c673b62014-03-07 20:34:46 +01003624 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
Paulo Zanoni86642812013-04-12 17:57:57 -03003625
Ville Syrjäläb51a2842015-09-18 20:03:41 +03003626 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
Paulo Zanonid46da432013-02-08 17:35:15 -02003627 I915_WRITE(SDEIMR, ~mask);
Paulo Zanonid46da432013-02-08 17:35:15 -02003628}
3629
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003630static void gen5_gt_irq_postinstall(struct drm_device *dev)
3631{
3632 struct drm_i915_private *dev_priv = dev->dev_private;
3633 u32 pm_irqs, gt_irqs;
3634
3635 pm_irqs = gt_irqs = 0;
3636
3637 dev_priv->gt_irq_mask = ~0;
Ben Widawsky040d2ba2013-09-19 11:01:40 -07003638 if (HAS_L3_DPF(dev)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003639 /* L3 parity interrupt is always unmasked. */
Ben Widawsky35a85ac2013-09-19 11:13:41 -07003640 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3641 gt_irqs |= GT_PARITY_ERROR(dev);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003642 }
3643
3644 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3645 if (IS_GEN5(dev)) {
3646 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3647 ILK_BSD_USER_INTERRUPT;
3648 } else {
3649 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3650 }
3651
Paulo Zanoni35079892014-04-01 15:37:15 -03003652 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003653
3654 if (INTEL_INFO(dev)->gen >= 6) {
Imre Deak78e68d32014-12-15 18:59:27 +02003655 /*
3656 * RPS interrupts will get enabled/disabled on demand when RPS
3657 * itself is enabled/disabled.
3658 */
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003659 if (HAS_VEBOX(dev))
3660 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3661
Paulo Zanoni605cd252013-08-06 18:57:15 -03003662 dev_priv->pm_irq_mask = 0xffffffff;
Paulo Zanoni35079892014-04-01 15:37:15 -03003663 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003664 }
3665}
3666
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003667static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003668{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003669 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003670 u32 display_mask, extra_mask;
3671
3672 if (INTEL_INFO(dev)->gen >= 7) {
3673 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3674 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3675 DE_PLANEB_FLIP_DONE_IVB |
Daniel Vetter5c673b62014-03-07 20:34:46 +01003676 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003677 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03003678 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3679 DE_DP_A_HOTPLUG_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003680 } else {
3681 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3682 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02003683 DE_AUX_CHANNEL_A |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02003684 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3685 DE_POISON);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003686 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3687 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3688 DE_DP_A_HOTPLUG);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003689 }
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003690
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003691 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003692
Paulo Zanoni0c841212014-04-01 15:37:27 -03003693 I915_WRITE(HWSTAM, 0xeffe);
3694
Paulo Zanoni622364b2014-04-01 15:37:22 -03003695 ibx_irq_pre_postinstall(dev);
3696
Paulo Zanoni35079892014-04-01 15:37:15 -03003697 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003698
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003699 gen5_gt_irq_postinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003700
Paulo Zanonid46da432013-02-08 17:35:15 -02003701 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07003702
Jesse Barnesf97108d2010-01-29 11:27:07 -08003703 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02003704 /* Enable PCU event interrupts
3705 *
3706 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02003707 * setup is guaranteed to run in single-threaded context. But we
3708 * need it to make the assert_spin_locked happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003709 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02003710 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetterd6207432014-09-15 14:55:27 +02003711 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -08003712 }
3713
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003714 return 0;
3715}
3716
Imre Deakf8b79e52014-03-04 19:23:07 +02003717void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3718{
3719 assert_spin_locked(&dev_priv->irq_lock);
3720
3721 if (dev_priv->display_irqs_enabled)
3722 return;
3723
3724 dev_priv->display_irqs_enabled = true;
3725
Ville Syrjäläd6c69802016-04-11 16:56:27 +03003726 if (intel_irqs_enabled(dev_priv)) {
3727 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003728 vlv_display_irq_postinstall(dev_priv);
Ville Syrjäläd6c69802016-04-11 16:56:27 +03003729 }
Imre Deakf8b79e52014-03-04 19:23:07 +02003730}
3731
3732void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3733{
3734 assert_spin_locked(&dev_priv->irq_lock);
3735
3736 if (!dev_priv->display_irqs_enabled)
3737 return;
3738
3739 dev_priv->display_irqs_enabled = false;
3740
Imre Deak950eaba2014-09-08 15:21:09 +03003741 if (intel_irqs_enabled(dev_priv))
Ville Syrjäläad22d102016-04-12 18:56:14 +03003742 vlv_display_irq_reset(dev_priv);
Imre Deakf8b79e52014-03-04 19:23:07 +02003743}
3744
Ville Syrjälä0e6c9a92014-10-30 19:43:00 +02003745
3746static int valleyview_irq_postinstall(struct drm_device *dev)
3747{
3748 struct drm_i915_private *dev_priv = dev->dev_private;
3749
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003750 gen5_gt_irq_postinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003751
Ville Syrjäläad22d102016-04-12 18:56:14 +03003752 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003753 if (dev_priv->display_irqs_enabled)
3754 vlv_display_irq_postinstall(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003755 spin_unlock_irq(&dev_priv->irq_lock);
3756
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003757 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Ville Syrjälä34c7b8a2016-04-13 21:19:48 +03003758 POSTING_READ(VLV_MASTER_IER);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003759
3760 return 0;
3761}
3762
Ben Widawskyabd58f02013-11-02 21:07:09 -07003763static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3764{
Ben Widawskyabd58f02013-11-02 21:07:09 -07003765 /* These are interrupts we'll toggle with the ring mask register */
3766 uint32_t gt_interrupts[] = {
3767 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003768 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003769 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3770 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
Ben Widawskyabd58f02013-11-02 21:07:09 -07003771 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003772 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3773 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3774 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
Ben Widawskyabd58f02013-11-02 21:07:09 -07003775 0,
Oscar Mateo73d477f2014-07-24 17:04:31 +01003776 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3777 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
Ben Widawskyabd58f02013-11-02 21:07:09 -07003778 };
3779
Tvrtko Ursulin98735732016-04-19 16:46:08 +01003780 if (HAS_L3_DPF(dev_priv))
3781 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3782
Ben Widawsky09610212014-05-15 20:58:08 +03003783 dev_priv->pm_irq_mask = 0xffffffff;
Deepak S9a2d2d82014-08-22 08:32:40 +05303784 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3785 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
Imre Deak78e68d32014-12-15 18:59:27 +02003786 /*
3787 * RPS interrupts will get enabled/disabled on demand when RPS itself
3788 * is enabled/disabled.
3789 */
3790 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
Deepak S9a2d2d82014-08-22 08:32:40 +05303791 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003792}
3793
3794static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3795{
Damien Lespiau770de832014-03-20 20:45:01 +00003796 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3797 uint32_t de_pipe_enables;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003798 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3799 u32 de_port_enables;
Ville Syrjälä11825b02016-05-19 12:14:43 +03003800 u32 de_misc_masked = GEN8_DE_MISC_GSE;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003801 enum pipe pipe;
Damien Lespiau770de832014-03-20 20:45:01 +00003802
Rodrigo Vivib4834a52015-09-02 15:19:24 -07003803 if (INTEL_INFO(dev_priv)->gen >= 9) {
Damien Lespiau770de832014-03-20 20:45:01 +00003804 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3805 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003806 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3807 GEN9_AUX_CHANNEL_D;
Shashank Sharma9e637432014-08-22 17:40:43 +05303808 if (IS_BROXTON(dev_priv))
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003809 de_port_masked |= BXT_DE_PORT_GMBUS;
3810 } else {
Damien Lespiau770de832014-03-20 20:45:01 +00003811 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3812 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003813 }
Damien Lespiau770de832014-03-20 20:45:01 +00003814
3815 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3816 GEN8_PIPE_FIFO_UNDERRUN;
3817
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003818 de_port_enables = de_port_masked;
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003819 if (IS_BROXTON(dev_priv))
3820 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3821 else if (IS_BROADWELL(dev_priv))
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003822 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3823
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01003824 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3825 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3826 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003827
Damien Lespiau055e3932014-08-18 13:49:10 +01003828 for_each_pipe(dev_priv, pipe)
Daniel Vetterf458ebb2014-09-30 10:56:39 +02003829 if (intel_display_power_is_enabled(dev_priv,
Paulo Zanoni813bde42014-07-04 11:50:29 -03003830 POWER_DOMAIN_PIPE(pipe)))
3831 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3832 dev_priv->de_irq_mask[pipe],
3833 de_pipe_enables);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003834
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003835 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
Ville Syrjälä11825b02016-05-19 12:14:43 +03003836 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003837}
3838
3839static int gen8_irq_postinstall(struct drm_device *dev)
3840{
3841 struct drm_i915_private *dev_priv = dev->dev_private;
3842
Shashank Sharma266ea3d2014-08-22 17:40:42 +05303843 if (HAS_PCH_SPLIT(dev))
3844 ibx_irq_pre_postinstall(dev);
Paulo Zanoni622364b2014-04-01 15:37:22 -03003845
Ben Widawskyabd58f02013-11-02 21:07:09 -07003846 gen8_gt_irq_postinstall(dev_priv);
3847 gen8_de_irq_postinstall(dev_priv);
3848
Shashank Sharma266ea3d2014-08-22 17:40:42 +05303849 if (HAS_PCH_SPLIT(dev))
3850 ibx_irq_postinstall(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003851
Ville Syrjäläe5328c42016-04-13 21:19:47 +03003852 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003853 POSTING_READ(GEN8_MASTER_IRQ);
3854
3855 return 0;
3856}
3857
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003858static int cherryview_irq_postinstall(struct drm_device *dev)
3859{
3860 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003861
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003862 gen8_gt_irq_postinstall(dev_priv);
3863
Ville Syrjäläad22d102016-04-12 18:56:14 +03003864 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003865 if (dev_priv->display_irqs_enabled)
3866 vlv_display_irq_postinstall(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003867 spin_unlock_irq(&dev_priv->irq_lock);
3868
Ville Syrjäläe5328c42016-04-13 21:19:47 +03003869 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003870 POSTING_READ(GEN8_MASTER_IRQ);
3871
3872 return 0;
3873}
3874
Ben Widawskyabd58f02013-11-02 21:07:09 -07003875static void gen8_irq_uninstall(struct drm_device *dev)
3876{
3877 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003878
3879 if (!dev_priv)
3880 return;
3881
Paulo Zanoni823f6b32014-04-01 15:37:26 -03003882 gen8_irq_reset(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003883}
3884
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003885static void valleyview_irq_uninstall(struct drm_device *dev)
3886{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003887 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003888
3889 if (!dev_priv)
3890 return;
3891
Imre Deak843d0e72014-04-14 20:24:23 +03003892 I915_WRITE(VLV_MASTER_IER, 0);
Ville Syrjälä34c7b8a2016-04-13 21:19:48 +03003893 POSTING_READ(VLV_MASTER_IER);
Imre Deak843d0e72014-04-14 20:24:23 +03003894
Ville Syrjälä893fce82014-10-30 19:42:56 +02003895 gen5_gt_irq_reset(dev);
3896
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003897 I915_WRITE(HWSTAM, 0xffffffff);
Imre Deakf8b79e52014-03-04 19:23:07 +02003898
Ville Syrjäläad22d102016-04-12 18:56:14 +03003899 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003900 if (dev_priv->display_irqs_enabled)
3901 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003902 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003903}
3904
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003905static void cherryview_irq_uninstall(struct drm_device *dev)
3906{
3907 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003908
3909 if (!dev_priv)
3910 return;
3911
3912 I915_WRITE(GEN8_MASTER_IRQ, 0);
3913 POSTING_READ(GEN8_MASTER_IRQ);
3914
Ville Syrjäläa2c30fb2014-10-30 19:42:52 +02003915 gen8_gt_irq_reset(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003916
Ville Syrjäläa2c30fb2014-10-30 19:42:52 +02003917 GEN5_IRQ_RESET(GEN8_PCU_);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003918
Ville Syrjäläad22d102016-04-12 18:56:14 +03003919 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003920 if (dev_priv->display_irqs_enabled)
3921 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003922 spin_unlock_irq(&dev_priv->irq_lock);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003923}
3924
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003925static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003926{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003927 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes46979952011-04-07 13:53:55 -07003928
3929 if (!dev_priv)
3930 return;
3931
Paulo Zanonibe30b292014-04-01 15:37:25 -03003932 ironlake_irq_reset(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003933}
3934
Chris Wilsonc2798b12012-04-22 21:13:57 +01003935static void i8xx_irq_preinstall(struct drm_device * dev)
3936{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003937 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003938 int pipe;
3939
Damien Lespiau055e3932014-08-18 13:49:10 +01003940 for_each_pipe(dev_priv, pipe)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003941 I915_WRITE(PIPESTAT(pipe), 0);
3942 I915_WRITE16(IMR, 0xffff);
3943 I915_WRITE16(IER, 0x0);
3944 POSTING_READ16(IER);
3945}
3946
3947static int i8xx_irq_postinstall(struct drm_device *dev)
3948{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003949 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003950
Chris Wilsonc2798b12012-04-22 21:13:57 +01003951 I915_WRITE16(EMR,
3952 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3953
3954 /* Unmask the interrupts that we always want on. */
3955 dev_priv->irq_mask =
3956 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3957 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3958 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
Daniel Vetter37ef01a2015-04-01 13:43:46 +02003959 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003960 I915_WRITE16(IMR, dev_priv->irq_mask);
3961
3962 I915_WRITE16(IER,
3963 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3964 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Chris Wilsonc2798b12012-04-22 21:13:57 +01003965 I915_USER_INTERRUPT);
3966 POSTING_READ16(IER);
3967
Daniel Vetter379ef822013-10-16 22:55:56 +02003968 /* Interrupt setup is already guaranteed to be single-threaded, this is
3969 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003970 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02003971 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3972 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02003973 spin_unlock_irq(&dev_priv->irq_lock);
Daniel Vetter379ef822013-10-16 22:55:56 +02003974
Chris Wilsonc2798b12012-04-22 21:13:57 +01003975 return 0;
3976}
3977
Daniel Vetter5a21b662016-05-24 17:13:53 +02003978/*
3979 * Returns true when a page flip has completed.
3980 */
3981static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
3982 int plane, int pipe, u32 iir)
3983{
3984 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3985
3986 if (!intel_pipe_handle_vblank(dev_priv, pipe))
3987 return false;
3988
3989 if ((iir & flip_pending) == 0)
3990 goto check_page_flip;
3991
3992 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3993 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3994 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3995 * the flip is completed (no longer pending). Since this doesn't raise
3996 * an interrupt per se, we watch for the change at vblank.
3997 */
3998 if (I915_READ16(ISR) & flip_pending)
3999 goto check_page_flip;
4000
4001 intel_finish_page_flip_cs(dev_priv, pipe);
4002 return true;
4003
4004check_page_flip:
4005 intel_check_page_flip(dev_priv, pipe);
4006 return false;
4007}
4008
Daniel Vetterff1f5252012-10-02 15:10:55 +02004009static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01004010{
Daniel Vetter45a83f82014-05-12 19:17:55 +02004011 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03004012 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004013 u16 iir, new_iir;
4014 u32 pipe_stats[2];
Chris Wilsonc2798b12012-04-22 21:13:57 +01004015 int pipe;
4016 u16 flip_mask =
4017 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4018 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Imre Deak1f814da2015-12-16 02:52:19 +02004019 irqreturn_t ret;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004020
Imre Deak2dd2a882015-02-24 11:14:30 +02004021 if (!intel_irqs_enabled(dev_priv))
4022 return IRQ_NONE;
4023
Imre Deak1f814da2015-12-16 02:52:19 +02004024 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4025 disable_rpm_wakeref_asserts(dev_priv);
4026
4027 ret = IRQ_NONE;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004028 iir = I915_READ16(IIR);
4029 if (iir == 0)
Imre Deak1f814da2015-12-16 02:52:19 +02004030 goto out;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004031
4032 while (iir & ~flip_mask) {
4033 /* Can't rely on pipestat interrupt bit in iir as it might
4034 * have been cleared after the pipestat interrupt was received.
4035 * It doesn't set the bit in iir again, but it still produces
4036 * interrupts (for non-MSI).
4037 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02004038 spin_lock(&dev_priv->irq_lock);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004039 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Daniel Vetteraaecdf62014-11-04 15:52:22 +01004040 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004041
Damien Lespiau055e3932014-08-18 13:49:10 +01004042 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004043 i915_reg_t reg = PIPESTAT(pipe);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004044 pipe_stats[pipe] = I915_READ(reg);
4045
4046 /*
4047 * Clear the PIPE*STAT regs before the IIR
4048 */
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02004049 if (pipe_stats[pipe] & 0x8000ffff)
Chris Wilsonc2798b12012-04-22 21:13:57 +01004050 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004051 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02004052 spin_unlock(&dev_priv->irq_lock);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004053
4054 I915_WRITE16(IIR, iir & ~flip_mask);
4055 new_iir = I915_READ16(IIR); /* Flush posted writes */
4056
Chris Wilsonc2798b12012-04-22 21:13:57 +01004057 if (iir & I915_USER_INTERRUPT)
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00004058 notify_ring(&dev_priv->engine[RCS]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004059
Damien Lespiau055e3932014-08-18 13:49:10 +01004060 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02004061 int plane = pipe;
4062 if (HAS_FBC(dev_priv))
4063 plane = !plane;
4064
4065 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4066 i8xx_handle_vblank(dev_priv, plane, pipe, iir))
4067 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004068
Daniel Vetter4356d582013-10-16 22:55:55 +02004069 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004070 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02004071
Daniel Vetter1f7247c2014-09-30 10:56:48 +02004072 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4073 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4074 pipe);
Daniel Vetter4356d582013-10-16 22:55:55 +02004075 }
Chris Wilsonc2798b12012-04-22 21:13:57 +01004076
4077 iir = new_iir;
4078 }
Imre Deak1f814da2015-12-16 02:52:19 +02004079 ret = IRQ_HANDLED;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004080
Imre Deak1f814da2015-12-16 02:52:19 +02004081out:
4082 enable_rpm_wakeref_asserts(dev_priv);
4083
4084 return ret;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004085}
4086
4087static void i8xx_irq_uninstall(struct drm_device * dev)
4088{
Jani Nikula2d1013d2014-03-31 14:27:17 +03004089 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004090 int pipe;
4091
Damien Lespiau055e3932014-08-18 13:49:10 +01004092 for_each_pipe(dev_priv, pipe) {
Chris Wilsonc2798b12012-04-22 21:13:57 +01004093 /* Clear enable bits; then clear status bits */
4094 I915_WRITE(PIPESTAT(pipe), 0);
4095 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4096 }
4097 I915_WRITE16(IMR, 0xffff);
4098 I915_WRITE16(IER, 0x0);
4099 I915_WRITE16(IIR, I915_READ16(IIR));
4100}
4101
Chris Wilsona266c7d2012-04-24 22:59:44 +01004102static void i915_irq_preinstall(struct drm_device * dev)
4103{
Jani Nikula2d1013d2014-03-31 14:27:17 +03004104 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004105 int pipe;
4106
Chris Wilsona266c7d2012-04-24 22:59:44 +01004107 if (I915_HAS_HOTPLUG(dev)) {
Egbert Eich0706f172015-09-23 16:15:27 +02004108 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004109 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4110 }
4111
Chris Wilson00d98eb2012-04-24 22:59:48 +01004112 I915_WRITE16(HWSTAM, 0xeffe);
Damien Lespiau055e3932014-08-18 13:49:10 +01004113 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004114 I915_WRITE(PIPESTAT(pipe), 0);
4115 I915_WRITE(IMR, 0xffffffff);
4116 I915_WRITE(IER, 0x0);
4117 POSTING_READ(IER);
4118}
4119
4120static int i915_irq_postinstall(struct drm_device *dev)
4121{
Jani Nikula2d1013d2014-03-31 14:27:17 +03004122 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson38bde182012-04-24 22:59:50 +01004123 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004124
Chris Wilson38bde182012-04-24 22:59:50 +01004125 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4126
4127 /* Unmask the interrupts that we always want on. */
4128 dev_priv->irq_mask =
4129 ~(I915_ASLE_INTERRUPT |
4130 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4131 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4132 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
Daniel Vetter37ef01a2015-04-01 13:43:46 +02004133 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilson38bde182012-04-24 22:59:50 +01004134
4135 enable_mask =
4136 I915_ASLE_INTERRUPT |
4137 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4138 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Chris Wilson38bde182012-04-24 22:59:50 +01004139 I915_USER_INTERRUPT;
4140
Chris Wilsona266c7d2012-04-24 22:59:44 +01004141 if (I915_HAS_HOTPLUG(dev)) {
Egbert Eich0706f172015-09-23 16:15:27 +02004142 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004143 POSTING_READ(PORT_HOTPLUG_EN);
4144
Chris Wilsona266c7d2012-04-24 22:59:44 +01004145 /* Enable in IER... */
4146 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4147 /* and unmask in IMR */
4148 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4149 }
4150
Chris Wilsona266c7d2012-04-24 22:59:44 +01004151 I915_WRITE(IMR, dev_priv->irq_mask);
4152 I915_WRITE(IER, enable_mask);
4153 POSTING_READ(IER);
4154
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004155 i915_enable_asle_pipestat(dev_priv);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004156
Daniel Vetter379ef822013-10-16 22:55:56 +02004157 /* Interrupt setup is already guaranteed to be single-threaded, this is
4158 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02004159 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02004160 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4161 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02004162 spin_unlock_irq(&dev_priv->irq_lock);
Daniel Vetter379ef822013-10-16 22:55:56 +02004163
Daniel Vetter20afbda2012-12-11 14:05:07 +01004164 return 0;
4165}
4166
Daniel Vetter5a21b662016-05-24 17:13:53 +02004167/*
4168 * Returns true when a page flip has completed.
4169 */
4170static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4171 int plane, int pipe, u32 iir)
4172{
4173 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4174
4175 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4176 return false;
4177
4178 if ((iir & flip_pending) == 0)
4179 goto check_page_flip;
4180
4181 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4182 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4183 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4184 * the flip is completed (no longer pending). Since this doesn't raise
4185 * an interrupt per se, we watch for the change at vblank.
4186 */
4187 if (I915_READ(ISR) & flip_pending)
4188 goto check_page_flip;
4189
4190 intel_finish_page_flip_cs(dev_priv, pipe);
4191 return true;
4192
4193check_page_flip:
4194 intel_check_page_flip(dev_priv, pipe);
4195 return false;
4196}
4197
Daniel Vetterff1f5252012-10-02 15:10:55 +02004198static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004199{
Daniel Vetter45a83f82014-05-12 19:17:55 +02004200 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03004201 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8291ee92012-04-24 22:59:47 +01004202 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilson38bde182012-04-24 22:59:50 +01004203 u32 flip_mask =
4204 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4205 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilson38bde182012-04-24 22:59:50 +01004206 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004207
Imre Deak2dd2a882015-02-24 11:14:30 +02004208 if (!intel_irqs_enabled(dev_priv))
4209 return IRQ_NONE;
4210
Imre Deak1f814da2015-12-16 02:52:19 +02004211 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4212 disable_rpm_wakeref_asserts(dev_priv);
4213
Chris Wilsona266c7d2012-04-24 22:59:44 +01004214 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01004215 do {
4216 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01004217 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004218
4219 /* Can't rely on pipestat interrupt bit in iir as it might
4220 * have been cleared after the pipestat interrupt was received.
4221 * It doesn't set the bit in iir again, but it still produces
4222 * interrupts (for non-MSI).
4223 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02004224 spin_lock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004225 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Daniel Vetteraaecdf62014-11-04 15:52:22 +01004226 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004227
Damien Lespiau055e3932014-08-18 13:49:10 +01004228 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004229 i915_reg_t reg = PIPESTAT(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004230 pipe_stats[pipe] = I915_READ(reg);
4231
Chris Wilson38bde182012-04-24 22:59:50 +01004232 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01004233 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004234 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01004235 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004236 }
4237 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02004238 spin_unlock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004239
4240 if (!irq_received)
4241 break;
4242
Chris Wilsona266c7d2012-04-24 22:59:44 +01004243 /* Consume port. Then clear IIR or we'll miss events */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004244 if (I915_HAS_HOTPLUG(dev_priv) &&
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03004245 iir & I915_DISPLAY_PORT_INTERRUPT) {
4246 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4247 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004248 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03004249 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01004250
Chris Wilson38bde182012-04-24 22:59:50 +01004251 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004252 new_iir = I915_READ(IIR); /* Flush posted writes */
4253
Chris Wilsona266c7d2012-04-24 22:59:44 +01004254 if (iir & I915_USER_INTERRUPT)
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00004255 notify_ring(&dev_priv->engine[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004256
Damien Lespiau055e3932014-08-18 13:49:10 +01004257 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02004258 int plane = pipe;
4259 if (HAS_FBC(dev_priv))
4260 plane = !plane;
4261
4262 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4263 i915_handle_vblank(dev_priv, plane, pipe, iir))
4264 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004265
4266 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4267 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02004268
4269 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004270 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02004271
Daniel Vetter1f7247c2014-09-30 10:56:48 +02004272 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4273 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4274 pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004275 }
4276
Chris Wilsona266c7d2012-04-24 22:59:44 +01004277 if (blc_event || (iir & I915_ASLE_INTERRUPT))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004278 intel_opregion_asle_intr(dev_priv);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004279
4280 /* With MSI, interrupts are only generated when iir
4281 * transitions from zero to nonzero. If another bit got
4282 * set while we were handling the existing iir bits, then
4283 * we would never get another interrupt.
4284 *
4285 * This is fine on non-MSI as well, as if we hit this path
4286 * we avoid exiting the interrupt handler only to generate
4287 * another one.
4288 *
4289 * Note that for MSI this could cause a stray interrupt report
4290 * if an interrupt landed in the time between writing IIR and
4291 * the posting read. This should be rare enough to never
4292 * trigger the 99% of 100,000 interrupts test for disabling
4293 * stray interrupts.
4294 */
Chris Wilson38bde182012-04-24 22:59:50 +01004295 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004296 iir = new_iir;
Chris Wilson38bde182012-04-24 22:59:50 +01004297 } while (iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004298
Imre Deak1f814da2015-12-16 02:52:19 +02004299 enable_rpm_wakeref_asserts(dev_priv);
4300
Chris Wilsona266c7d2012-04-24 22:59:44 +01004301 return ret;
4302}
4303
4304static void i915_irq_uninstall(struct drm_device * dev)
4305{
Jani Nikula2d1013d2014-03-31 14:27:17 +03004306 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004307 int pipe;
4308
Chris Wilsona266c7d2012-04-24 22:59:44 +01004309 if (I915_HAS_HOTPLUG(dev)) {
Egbert Eich0706f172015-09-23 16:15:27 +02004310 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004311 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4312 }
4313
Chris Wilson00d98eb2012-04-24 22:59:48 +01004314 I915_WRITE16(HWSTAM, 0xffff);
Damien Lespiau055e3932014-08-18 13:49:10 +01004315 for_each_pipe(dev_priv, pipe) {
Chris Wilson55b39752012-04-24 22:59:49 +01004316 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01004317 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01004318 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4319 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01004320 I915_WRITE(IMR, 0xffffffff);
4321 I915_WRITE(IER, 0x0);
4322
Chris Wilsona266c7d2012-04-24 22:59:44 +01004323 I915_WRITE(IIR, I915_READ(IIR));
4324}
4325
4326static void i965_irq_preinstall(struct drm_device * dev)
4327{
Jani Nikula2d1013d2014-03-31 14:27:17 +03004328 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004329 int pipe;
4330
Egbert Eich0706f172015-09-23 16:15:27 +02004331 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsonadca4732012-05-11 18:01:31 +01004332 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01004333
4334 I915_WRITE(HWSTAM, 0xeffe);
Damien Lespiau055e3932014-08-18 13:49:10 +01004335 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004336 I915_WRITE(PIPESTAT(pipe), 0);
4337 I915_WRITE(IMR, 0xffffffff);
4338 I915_WRITE(IER, 0x0);
4339 POSTING_READ(IER);
4340}
4341
4342static int i965_irq_postinstall(struct drm_device *dev)
4343{
Jani Nikula2d1013d2014-03-31 14:27:17 +03004344 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonbbba0a92012-04-24 22:59:51 +01004345 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004346 u32 error_mask;
4347
Chris Wilsona266c7d2012-04-24 22:59:44 +01004348 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01004349 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01004350 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01004351 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4352 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4353 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4354 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4355 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4356
4357 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02004358 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4359 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01004360 enable_mask |= I915_USER_INTERRUPT;
4361
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004362 if (IS_G4X(dev_priv))
Chris Wilsonbbba0a92012-04-24 22:59:51 +01004363 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004364
Daniel Vetterb79480b2013-06-27 17:52:10 +02004365 /* Interrupt setup is already guaranteed to be single-threaded, this is
4366 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02004367 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02004368 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4369 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4370 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02004371 spin_unlock_irq(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004372
Chris Wilsona266c7d2012-04-24 22:59:44 +01004373 /*
4374 * Enable some error detection, note the instruction error mask
4375 * bit is reserved, so we leave it masked.
4376 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004377 if (IS_G4X(dev_priv)) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004378 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4379 GM45_ERROR_MEM_PRIV |
4380 GM45_ERROR_CP_PRIV |
4381 I915_ERROR_MEMORY_REFRESH);
4382 } else {
4383 error_mask = ~(I915_ERROR_PAGE_TABLE |
4384 I915_ERROR_MEMORY_REFRESH);
4385 }
4386 I915_WRITE(EMR, error_mask);
4387
4388 I915_WRITE(IMR, dev_priv->irq_mask);
4389 I915_WRITE(IER, enable_mask);
4390 POSTING_READ(IER);
4391
Egbert Eich0706f172015-09-23 16:15:27 +02004392 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004393 POSTING_READ(PORT_HOTPLUG_EN);
4394
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004395 i915_enable_asle_pipestat(dev_priv);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004396
4397 return 0;
4398}
4399
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004400static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
Daniel Vetter20afbda2012-12-11 14:05:07 +01004401{
Daniel Vetter20afbda2012-12-11 14:05:07 +01004402 u32 hotplug_en;
4403
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02004404 assert_spin_locked(&dev_priv->irq_lock);
4405
Ville Syrjälä778eb332015-01-09 14:21:13 +02004406 /* Note HDMI and DP share hotplug bits */
4407 /* enable bits are the same for all generations */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004408 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
Ville Syrjälä778eb332015-01-09 14:21:13 +02004409 /* Programming the CRT detection parameters tends
4410 to generate a spurious hotplug event about three
4411 seconds later. So just do it once.
4412 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004413 if (IS_G4X(dev_priv))
Ville Syrjälä778eb332015-01-09 14:21:13 +02004414 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Ville Syrjälä778eb332015-01-09 14:21:13 +02004415 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004416
Ville Syrjälä778eb332015-01-09 14:21:13 +02004417 /* Ignore TV since it's buggy */
Egbert Eich0706f172015-09-23 16:15:27 +02004418 i915_hotplug_interrupt_update_locked(dev_priv,
Jani Nikulaf9e3dc72015-10-21 17:22:43 +03004419 HOTPLUG_INT_EN_MASK |
4420 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4421 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4422 hotplug_en);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004423}
4424
Daniel Vetterff1f5252012-10-02 15:10:55 +02004425static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004426{
Daniel Vetter45a83f82014-05-12 19:17:55 +02004427 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03004428 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004429 u32 iir, new_iir;
4430 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01004431 int ret = IRQ_NONE, pipe;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02004432 u32 flip_mask =
4433 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4434 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004435
Imre Deak2dd2a882015-02-24 11:14:30 +02004436 if (!intel_irqs_enabled(dev_priv))
4437 return IRQ_NONE;
4438
Imre Deak1f814da2015-12-16 02:52:19 +02004439 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4440 disable_rpm_wakeref_asserts(dev_priv);
4441
Chris Wilsona266c7d2012-04-24 22:59:44 +01004442 iir = I915_READ(IIR);
4443
Chris Wilsona266c7d2012-04-24 22:59:44 +01004444 for (;;) {
Ville Syrjälä501e01d2014-01-17 11:35:15 +02004445 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson2c8ba292012-04-24 22:59:46 +01004446 bool blc_event = false;
4447
Chris Wilsona266c7d2012-04-24 22:59:44 +01004448 /* Can't rely on pipestat interrupt bit in iir as it might
4449 * have been cleared after the pipestat interrupt was received.
4450 * It doesn't set the bit in iir again, but it still produces
4451 * interrupts (for non-MSI).
4452 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02004453 spin_lock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004454 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Daniel Vetteraaecdf62014-11-04 15:52:22 +01004455 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004456
Damien Lespiau055e3932014-08-18 13:49:10 +01004457 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004458 i915_reg_t reg = PIPESTAT(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004459 pipe_stats[pipe] = I915_READ(reg);
4460
4461 /*
4462 * Clear the PIPE*STAT regs before the IIR
4463 */
4464 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004465 I915_WRITE(reg, pipe_stats[pipe]);
Ville Syrjälä501e01d2014-01-17 11:35:15 +02004466 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004467 }
4468 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02004469 spin_unlock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004470
4471 if (!irq_received)
4472 break;
4473
4474 ret = IRQ_HANDLED;
4475
4476 /* Consume port. Then clear IIR or we'll miss events */
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03004477 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4478 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4479 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004480 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03004481 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01004482
Ville Syrjälä21ad8332013-02-19 15:16:39 +02004483 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004484 new_iir = I915_READ(IIR); /* Flush posted writes */
4485
Chris Wilsona266c7d2012-04-24 22:59:44 +01004486 if (iir & I915_USER_INTERRUPT)
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00004487 notify_ring(&dev_priv->engine[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004488 if (iir & I915_BSD_USER_INTERRUPT)
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00004489 notify_ring(&dev_priv->engine[VCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004490
Damien Lespiau055e3932014-08-18 13:49:10 +01004491 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02004492 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4493 i915_handle_vblank(dev_priv, pipe, pipe, iir))
4494 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004495
4496 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4497 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02004498
4499 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004500 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004501
Daniel Vetter1f7247c2014-09-30 10:56:48 +02004502 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4503 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02004504 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01004505
4506 if (blc_event || (iir & I915_ASLE_INTERRUPT))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004507 intel_opregion_asle_intr(dev_priv);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004508
Daniel Vetter515ac2b2012-12-01 13:53:44 +01004509 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004510 gmbus_irq_handler(dev_priv);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01004511
Chris Wilsona266c7d2012-04-24 22:59:44 +01004512 /* With MSI, interrupts are only generated when iir
4513 * transitions from zero to nonzero. If another bit got
4514 * set while we were handling the existing iir bits, then
4515 * we would never get another interrupt.
4516 *
4517 * This is fine on non-MSI as well, as if we hit this path
4518 * we avoid exiting the interrupt handler only to generate
4519 * another one.
4520 *
4521 * Note that for MSI this could cause a stray interrupt report
4522 * if an interrupt landed in the time between writing IIR and
4523 * the posting read. This should be rare enough to never
4524 * trigger the 99% of 100,000 interrupts test for disabling
4525 * stray interrupts.
4526 */
4527 iir = new_iir;
4528 }
4529
Imre Deak1f814da2015-12-16 02:52:19 +02004530 enable_rpm_wakeref_asserts(dev_priv);
4531
Chris Wilsona266c7d2012-04-24 22:59:44 +01004532 return ret;
4533}
4534
4535static void i965_irq_uninstall(struct drm_device * dev)
4536{
Jani Nikula2d1013d2014-03-31 14:27:17 +03004537 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004538 int pipe;
4539
4540 if (!dev_priv)
4541 return;
4542
Egbert Eich0706f172015-09-23 16:15:27 +02004543 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsonadca4732012-05-11 18:01:31 +01004544 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01004545
4546 I915_WRITE(HWSTAM, 0xffffffff);
Damien Lespiau055e3932014-08-18 13:49:10 +01004547 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004548 I915_WRITE(PIPESTAT(pipe), 0);
4549 I915_WRITE(IMR, 0xffffffff);
4550 I915_WRITE(IER, 0x0);
4551
Damien Lespiau055e3932014-08-18 13:49:10 +01004552 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004553 I915_WRITE(PIPESTAT(pipe),
4554 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4555 I915_WRITE(IIR, I915_READ(IIR));
4556}
4557
Daniel Vetterfca52a52014-09-30 10:56:45 +02004558/**
4559 * intel_irq_init - initializes irq support
4560 * @dev_priv: i915 device instance
4561 *
4562 * This function initializes all the irq support including work items, timers
4563 * and all the vtables. It does not setup the interrupt itself though.
4564 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004565void intel_irq_init(struct drm_i915_private *dev_priv)
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004566{
Daniel Vetterb9632912014-09-30 10:56:44 +02004567 struct drm_device *dev = dev_priv->dev;
Chris Wilson8b2e3262012-04-24 22:59:41 +01004568
Jani Nikula77913b32015-06-18 13:06:16 +03004569 intel_hpd_init_work(dev_priv);
4570
Daniel Vetterc6a828d2012-08-08 23:35:35 +02004571 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004572 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Chris Wilson8b2e3262012-04-24 22:59:41 +01004573
Deepak Sa6706b42014-03-15 20:23:22 +05304574 /* Let's track the enabled rps events */
Wayne Boyer666a4532015-12-09 12:29:35 -08004575 if (IS_VALLEYVIEW(dev_priv))
Ville Syrjälä6c65a582014-08-29 14:14:07 +03004576 /* WaGsvRC0ResidencyMethod:vlv */
Chris Wilson6f4b12f82015-03-18 09:48:23 +00004577 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
Deepak S31685c22014-07-03 17:33:01 -04004578 else
4579 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
Deepak Sa6706b42014-03-15 20:23:22 +05304580
Chris Wilson737b1502015-01-26 18:03:03 +02004581 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4582 i915_hangcheck_elapsed);
Daniel Vetter61bac782012-12-01 21:03:21 +01004583
Daniel Vetterb9632912014-09-30 10:56:44 +02004584 if (IS_GEN2(dev_priv)) {
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +03004585 dev->max_vblank_count = 0;
4586 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
Daniel Vetterb9632912014-09-30 10:56:44 +02004587 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004588 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
Ville Syrjäläfd8f5072015-09-18 20:03:42 +03004589 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
Ville Syrjälä391f75e2013-09-25 19:55:26 +03004590 } else {
4591 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4592 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004593 }
4594
Ville Syrjälä21da2702014-08-06 14:49:55 +03004595 /*
4596 * Opt out of the vblank disable timer on everything except gen2.
4597 * Gen2 doesn't have a hardware frame counter and so depends on
4598 * vblank interrupts to produce sane vblank seuquence numbers.
4599 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004600 if (!IS_GEN2(dev_priv))
Ville Syrjälä21da2702014-08-06 14:49:55 +03004601 dev->vblank_disable_immediate = true;
4602
Daniel Vetterf3a5c3f2015-02-13 21:03:44 +01004603 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4604 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004605
Daniel Vetterb9632912014-09-30 10:56:44 +02004606 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004607 dev->driver->irq_handler = cherryview_irq_handler;
4608 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4609 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4610 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4611 dev->driver->enable_vblank = valleyview_enable_vblank;
4612 dev->driver->disable_vblank = valleyview_disable_vblank;
4613 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Daniel Vetterb9632912014-09-30 10:56:44 +02004614 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07004615 dev->driver->irq_handler = valleyview_irq_handler;
4616 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4617 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4618 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4619 dev->driver->enable_vblank = valleyview_enable_vblank;
4620 dev->driver->disable_vblank = valleyview_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05004621 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Daniel Vetterb9632912014-09-30 10:56:44 +02004622 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
Ben Widawskyabd58f02013-11-02 21:07:09 -07004623 dev->driver->irq_handler = gen8_irq_handler;
Daniel Vetter723761b2014-05-22 17:56:34 +02004624 dev->driver->irq_preinstall = gen8_irq_reset;
Ben Widawskyabd58f02013-11-02 21:07:09 -07004625 dev->driver->irq_postinstall = gen8_irq_postinstall;
4626 dev->driver->irq_uninstall = gen8_irq_uninstall;
4627 dev->driver->enable_vblank = gen8_enable_vblank;
4628 dev->driver->disable_vblank = gen8_disable_vblank;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03004629 if (IS_BROXTON(dev))
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02004630 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03004631 else if (HAS_PCH_SPT(dev))
4632 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4633 else
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03004634 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004635 } else if (HAS_PCH_SPLIT(dev)) {
4636 dev->driver->irq_handler = ironlake_irq_handler;
Daniel Vetter723761b2014-05-22 17:56:34 +02004637 dev->driver->irq_preinstall = ironlake_irq_reset;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004638 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4639 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4640 dev->driver->enable_vblank = ironlake_enable_vblank;
4641 dev->driver->disable_vblank = ironlake_disable_vblank;
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03004642 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004643 } else {
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01004644 if (IS_GEN2(dev_priv)) {
Chris Wilsonc2798b12012-04-22 21:13:57 +01004645 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4646 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4647 dev->driver->irq_handler = i8xx_irq_handler;
4648 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01004649 } else if (IS_GEN3(dev_priv)) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004650 dev->driver->irq_preinstall = i915_irq_preinstall;
4651 dev->driver->irq_postinstall = i915_irq_postinstall;
4652 dev->driver->irq_uninstall = i915_irq_uninstall;
4653 dev->driver->irq_handler = i915_irq_handler;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004654 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004655 dev->driver->irq_preinstall = i965_irq_preinstall;
4656 dev->driver->irq_postinstall = i965_irq_postinstall;
4657 dev->driver->irq_uninstall = i965_irq_uninstall;
4658 dev->driver->irq_handler = i965_irq_handler;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004659 }
Ville Syrjälä778eb332015-01-09 14:21:13 +02004660 if (I915_HAS_HOTPLUG(dev_priv))
4661 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004662 dev->driver->enable_vblank = i915_enable_vblank;
4663 dev->driver->disable_vblank = i915_disable_vblank;
4664 }
4665}
Daniel Vetter20afbda2012-12-11 14:05:07 +01004666
Daniel Vetterfca52a52014-09-30 10:56:45 +02004667/**
Daniel Vetterfca52a52014-09-30 10:56:45 +02004668 * intel_irq_install - enables the hardware interrupt
4669 * @dev_priv: i915 device instance
4670 *
4671 * This function enables the hardware interrupt handling, but leaves the hotplug
4672 * handling still disabled. It is called after intel_irq_init().
4673 *
4674 * In the driver load and resume code we need working interrupts in a few places
4675 * but don't want to deal with the hassle of concurrent probe and hotplug
4676 * workers. Hence the split into this two-stage approach.
4677 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004678int intel_irq_install(struct drm_i915_private *dev_priv)
4679{
4680 /*
4681 * We enable some interrupt sources in our postinstall hooks, so mark
4682 * interrupts as enabled _before_ actually enabling them to avoid
4683 * special cases in our ordering checks.
4684 */
4685 dev_priv->pm.irqs_enabled = true;
4686
4687 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4688}
4689
Daniel Vetterfca52a52014-09-30 10:56:45 +02004690/**
4691 * intel_irq_uninstall - finilizes all irq handling
4692 * @dev_priv: i915 device instance
4693 *
4694 * This stops interrupt and hotplug handling and unregisters and frees all
4695 * resources acquired in the init functions.
4696 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004697void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4698{
4699 drm_irq_uninstall(dev_priv->dev);
4700 intel_hpd_cancel_work(dev_priv);
4701 dev_priv->pm.irqs_enabled = false;
4702}
4703
Daniel Vetterfca52a52014-09-30 10:56:45 +02004704/**
4705 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4706 * @dev_priv: i915 device instance
4707 *
4708 * This function is used to disable interrupts at runtime, both in the runtime
4709 * pm and the system suspend/resume code.
4710 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004711void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004712{
Daniel Vetterb9632912014-09-30 10:56:44 +02004713 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004714 dev_priv->pm.irqs_enabled = false;
Imre Deak2dd2a882015-02-24 11:14:30 +02004715 synchronize_irq(dev_priv->dev->irq);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004716}
4717
Daniel Vetterfca52a52014-09-30 10:56:45 +02004718/**
4719 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4720 * @dev_priv: i915 device instance
4721 *
4722 * This function is used to enable interrupts at runtime, both in the runtime
4723 * pm and the system suspend/resume code.
4724 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004725void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004726{
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004727 dev_priv->pm.irqs_enabled = true;
Daniel Vetterb9632912014-09-30 10:56:44 +02004728 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4729 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004730}