blob: a0510456293244f1a4a1f633c6d1f2a7387fa09c [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Damien Lespiaub2c88f52013-10-15 18:55:29 +010033#include <linux/circ_buf.h>
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/drmP.h>
35#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010037#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Daniel Vetterfca52a52014-09-30 10:56:45 +020040/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020048static const u32 hpd_ibx[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050049 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
54};
55
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020056static const u32 hpd_cpt[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050057 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010058 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050059 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62};
63
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020064static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050065 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
71};
72
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020073static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050074 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80};
81
Ville Syrjälä4bca26d2015-05-11 20:49:10 +030082static const u32 hpd_status_i915[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050083 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89};
90
Shashank Sharmae0a20ad2015-03-27 14:54:14 +020091/* BXT hpd list */
92static const u32 hpd_bxt[HPD_NUM_PINS] = {
Sonika Jindal7f3561b2015-08-10 10:35:35 +053093 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
Shashank Sharmae0a20ad2015-03-27 14:54:14 +020094 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
95 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
96};
97
Paulo Zanoni5c502442014-04-01 15:37:11 -030098/* IIR can theoretically queue up two events. Be paranoid. */
Paulo Zanonif86f3fb2014-04-01 15:37:14 -030099#define GEN8_IRQ_RESET_NDX(type, which) do { \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300100 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
101 POSTING_READ(GEN8_##type##_IMR(which)); \
102 I915_WRITE(GEN8_##type##_IER(which), 0); \
103 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
104 POSTING_READ(GEN8_##type##_IIR(which)); \
105 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
106 POSTING_READ(GEN8_##type##_IIR(which)); \
107} while (0)
108
Paulo Zanonif86f3fb2014-04-01 15:37:14 -0300109#define GEN5_IRQ_RESET(type) do { \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300110 I915_WRITE(type##IMR, 0xffffffff); \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300111 POSTING_READ(type##IMR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300112 I915_WRITE(type##IER, 0); \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300113 I915_WRITE(type##IIR, 0xffffffff); \
114 POSTING_READ(type##IIR); \
115 I915_WRITE(type##IIR, 0xffffffff); \
116 POSTING_READ(type##IIR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300117} while (0)
118
Paulo Zanoni337ba012014-04-01 15:37:16 -0300119/*
120 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
121 */
122#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
123 u32 val = I915_READ(reg); \
124 if (val) { \
125 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
126 (reg), val); \
127 I915_WRITE((reg), 0xffffffff); \
128 POSTING_READ(reg); \
129 I915_WRITE((reg), 0xffffffff); \
130 POSTING_READ(reg); \
131 } \
132} while (0)
133
Paulo Zanoni35079892014-04-01 15:37:15 -0300134#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
Paulo Zanoni337ba012014-04-01 15:37:16 -0300135 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300136 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
Ville Syrjälä7d1bd5392014-10-30 19:42:50 +0200137 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
138 POSTING_READ(GEN8_##type##_IMR(which)); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300139} while (0)
140
141#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
Paulo Zanoni337ba012014-04-01 15:37:16 -0300142 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300143 I915_WRITE(type##IER, (ier_val)); \
Ville Syrjälä7d1bd5392014-10-30 19:42:50 +0200144 I915_WRITE(type##IMR, (imr_val)); \
145 POSTING_READ(type##IMR); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300146} while (0)
147
Imre Deakc9a9a262014-11-05 20:48:37 +0200148static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
149
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800150/* For display hotplug interrupt */
Daniel Vetter47339cd2014-09-30 10:56:46 +0200151void
Jani Nikula2d1013d2014-03-31 14:27:17 +0300152ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800153{
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200154 assert_spin_locked(&dev_priv->irq_lock);
155
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700156 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300157 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300158
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000159 if ((dev_priv->irq_mask & mask) != 0) {
160 dev_priv->irq_mask &= ~mask;
161 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000162 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800163 }
164}
165
Daniel Vetter47339cd2014-09-30 10:56:46 +0200166void
Jani Nikula2d1013d2014-03-31 14:27:17 +0300167ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800168{
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200169 assert_spin_locked(&dev_priv->irq_lock);
170
Paulo Zanoni06ffc772014-07-17 17:43:46 -0300171 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300172 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300173
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000174 if ((dev_priv->irq_mask & mask) != mask) {
175 dev_priv->irq_mask |= mask;
176 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000177 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800178 }
179}
180
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300181/**
182 * ilk_update_gt_irq - update GTIMR
183 * @dev_priv: driver private
184 * @interrupt_mask: mask of interrupt bits to update
185 * @enabled_irq_mask: mask of interrupt bits to enable
186 */
187static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
188 uint32_t interrupt_mask,
189 uint32_t enabled_irq_mask)
190{
191 assert_spin_locked(&dev_priv->irq_lock);
192
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100193 WARN_ON(enabled_irq_mask & ~interrupt_mask);
194
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700195 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300196 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300197
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300198 dev_priv->gt_irq_mask &= ~interrupt_mask;
199 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
200 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
201 POSTING_READ(GTIMR);
202}
203
Daniel Vetter480c8032014-07-16 09:49:40 +0200204void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300205{
206 ilk_update_gt_irq(dev_priv, mask, mask);
207}
208
Daniel Vetter480c8032014-07-16 09:49:40 +0200209void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300210{
211 ilk_update_gt_irq(dev_priv, mask, 0);
212}
213
Imre Deakb900b942014-11-05 20:48:48 +0200214static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
215{
216 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
217}
218
Imre Deaka72fbc32014-11-05 20:48:31 +0200219static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
220{
221 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
222}
223
Imre Deakb900b942014-11-05 20:48:48 +0200224static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
225{
226 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
227}
228
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300229/**
230 * snb_update_pm_irq - update GEN6_PMIMR
231 * @dev_priv: driver private
232 * @interrupt_mask: mask of interrupt bits to update
233 * @enabled_irq_mask: mask of interrupt bits to enable
234 */
235static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
236 uint32_t interrupt_mask,
237 uint32_t enabled_irq_mask)
238{
Paulo Zanoni605cd252013-08-06 18:57:15 -0300239 uint32_t new_val;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300240
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100241 WARN_ON(enabled_irq_mask & ~interrupt_mask);
242
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300243 assert_spin_locked(&dev_priv->irq_lock);
244
Paulo Zanoni605cd252013-08-06 18:57:15 -0300245 new_val = dev_priv->pm_irq_mask;
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300246 new_val &= ~interrupt_mask;
247 new_val |= (~enabled_irq_mask & interrupt_mask);
248
Paulo Zanoni605cd252013-08-06 18:57:15 -0300249 if (new_val != dev_priv->pm_irq_mask) {
250 dev_priv->pm_irq_mask = new_val;
Imre Deaka72fbc32014-11-05 20:48:31 +0200251 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
252 POSTING_READ(gen6_pm_imr(dev_priv));
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300253 }
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300254}
255
Daniel Vetter480c8032014-07-16 09:49:40 +0200256void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300257{
Imre Deak9939fba2014-11-20 23:01:47 +0200258 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
259 return;
260
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300261 snb_update_pm_irq(dev_priv, mask, mask);
262}
263
Imre Deak9939fba2014-11-20 23:01:47 +0200264static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
265 uint32_t mask)
266{
267 snb_update_pm_irq(dev_priv, mask, 0);
268}
269
Daniel Vetter480c8032014-07-16 09:49:40 +0200270void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300271{
Imre Deak9939fba2014-11-20 23:01:47 +0200272 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
273 return;
274
275 __gen6_disable_pm_irq(dev_priv, mask);
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300276}
277
Imre Deak3cc134e2014-11-19 15:30:03 +0200278void gen6_reset_rps_interrupts(struct drm_device *dev)
279{
280 struct drm_i915_private *dev_priv = dev->dev_private;
281 uint32_t reg = gen6_pm_iir(dev_priv);
282
283 spin_lock_irq(&dev_priv->irq_lock);
284 I915_WRITE(reg, dev_priv->pm_rps_events);
285 I915_WRITE(reg, dev_priv->pm_rps_events);
286 POSTING_READ(reg);
Imre Deak096fad92015-03-23 19:11:35 +0200287 dev_priv->rps.pm_iir = 0;
Imre Deak3cc134e2014-11-19 15:30:03 +0200288 spin_unlock_irq(&dev_priv->irq_lock);
289}
290
Imre Deakb900b942014-11-05 20:48:48 +0200291void gen6_enable_rps_interrupts(struct drm_device *dev)
292{
293 struct drm_i915_private *dev_priv = dev->dev_private;
294
295 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak78e68d32014-12-15 18:59:27 +0200296
Imre Deakb900b942014-11-05 20:48:48 +0200297 WARN_ON(dev_priv->rps.pm_iir);
Imre Deak3cc134e2014-11-19 15:30:03 +0200298 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
Imre Deakd4d70aa2014-11-19 15:30:04 +0200299 dev_priv->rps.interrupts_enabled = true;
Imre Deak78e68d32014-12-15 18:59:27 +0200300 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
301 dev_priv->pm_rps_events);
Imre Deakb900b942014-11-05 20:48:48 +0200302 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Imre Deak78e68d32014-12-15 18:59:27 +0200303
Imre Deakb900b942014-11-05 20:48:48 +0200304 spin_unlock_irq(&dev_priv->irq_lock);
305}
306
Imre Deak59d02a12014-12-19 19:33:26 +0200307u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
308{
309 /*
Imre Deakf24eeb12014-12-19 19:33:27 +0200310 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
Imre Deak59d02a12014-12-19 19:33:26 +0200311 * if GEN6_PM_UP_EI_EXPIRED is masked.
Imre Deakf24eeb12014-12-19 19:33:27 +0200312 *
313 * TODO: verify if this can be reproduced on VLV,CHV.
Imre Deak59d02a12014-12-19 19:33:26 +0200314 */
315 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
316 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
317
318 if (INTEL_INFO(dev_priv)->gen >= 8)
319 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
320
321 return mask;
322}
323
Imre Deakb900b942014-11-05 20:48:48 +0200324void gen6_disable_rps_interrupts(struct drm_device *dev)
325{
326 struct drm_i915_private *dev_priv = dev->dev_private;
327
Imre Deakd4d70aa2014-11-19 15:30:04 +0200328 spin_lock_irq(&dev_priv->irq_lock);
329 dev_priv->rps.interrupts_enabled = false;
330 spin_unlock_irq(&dev_priv->irq_lock);
331
332 cancel_work_sync(&dev_priv->rps.work);
333
Imre Deak9939fba2014-11-20 23:01:47 +0200334 spin_lock_irq(&dev_priv->irq_lock);
335
Imre Deak59d02a12014-12-19 19:33:26 +0200336 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
Imre Deak9939fba2014-11-20 23:01:47 +0200337
338 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Imre Deakb900b942014-11-05 20:48:48 +0200339 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
340 ~dev_priv->pm_rps_events);
Imre Deak58072cc2015-03-23 19:11:34 +0200341
342 spin_unlock_irq(&dev_priv->irq_lock);
343
344 synchronize_irq(dev->irq);
Imre Deakb900b942014-11-05 20:48:48 +0200345}
346
Ben Widawsky09610212014-05-15 20:58:08 +0300347/**
Daniel Vetterfee884e2013-07-04 23:35:21 +0200348 * ibx_display_interrupt_update - update SDEIMR
349 * @dev_priv: driver private
350 * @interrupt_mask: mask of interrupt bits to update
351 * @enabled_irq_mask: mask of interrupt bits to enable
352 */
Daniel Vetter47339cd2014-09-30 10:56:46 +0200353void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
354 uint32_t interrupt_mask,
355 uint32_t enabled_irq_mask)
Daniel Vetterfee884e2013-07-04 23:35:21 +0200356{
357 uint32_t sdeimr = I915_READ(SDEIMR);
358 sdeimr &= ~interrupt_mask;
359 sdeimr |= (~enabled_irq_mask & interrupt_mask);
360
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100361 WARN_ON(enabled_irq_mask & ~interrupt_mask);
362
Daniel Vetterfee884e2013-07-04 23:35:21 +0200363 assert_spin_locked(&dev_priv->irq_lock);
364
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700365 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300366 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300367
Daniel Vetterfee884e2013-07-04 23:35:21 +0200368 I915_WRITE(SDEIMR, sdeimr);
369 POSTING_READ(SDEIMR);
370}
Paulo Zanoni86642812013-04-12 17:57:57 -0300371
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100372static void
Imre Deak755e9012014-02-10 18:42:47 +0200373__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
374 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800375{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200376 u32 reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200377 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800378
Daniel Vetterb79480b2013-06-27 17:52:10 +0200379 assert_spin_locked(&dev_priv->irq_lock);
Daniel Vetterd518ce52014-08-27 10:43:37 +0200380 WARN_ON(!intel_irqs_enabled(dev_priv));
Daniel Vetterb79480b2013-06-27 17:52:10 +0200381
Ville Syrjälä04feced2014-04-03 13:28:33 +0300382 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
383 status_mask & ~PIPESTAT_INT_STATUS_MASK,
384 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
385 pipe_name(pipe), enable_mask, status_mask))
Imre Deak755e9012014-02-10 18:42:47 +0200386 return;
387
388 if ((pipestat & enable_mask) == enable_mask)
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200389 return;
390
Imre Deak91d181d2014-02-10 18:42:49 +0200391 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
392
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200393 /* Enable the interrupt, clear any pending status */
Imre Deak755e9012014-02-10 18:42:47 +0200394 pipestat |= enable_mask | status_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200395 I915_WRITE(reg, pipestat);
396 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800397}
398
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100399static void
Imre Deak755e9012014-02-10 18:42:47 +0200400__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
401 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800402{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200403 u32 reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200404 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800405
Daniel Vetterb79480b2013-06-27 17:52:10 +0200406 assert_spin_locked(&dev_priv->irq_lock);
Daniel Vetterd518ce52014-08-27 10:43:37 +0200407 WARN_ON(!intel_irqs_enabled(dev_priv));
Daniel Vetterb79480b2013-06-27 17:52:10 +0200408
Ville Syrjälä04feced2014-04-03 13:28:33 +0300409 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
410 status_mask & ~PIPESTAT_INT_STATUS_MASK,
411 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
412 pipe_name(pipe), enable_mask, status_mask))
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200413 return;
414
Imre Deak755e9012014-02-10 18:42:47 +0200415 if ((pipestat & enable_mask) == 0)
416 return;
417
Imre Deak91d181d2014-02-10 18:42:49 +0200418 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
419
Imre Deak755e9012014-02-10 18:42:47 +0200420 pipestat &= ~enable_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200421 I915_WRITE(reg, pipestat);
422 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800423}
424
Imre Deak10c59c52014-02-10 18:42:48 +0200425static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
426{
427 u32 enable_mask = status_mask << 16;
428
429 /*
Ville Syrjälä724a6902014-04-09 13:28:48 +0300430 * On pipe A we don't support the PSR interrupt yet,
431 * on pipe B and C the same bit MBZ.
Imre Deak10c59c52014-02-10 18:42:48 +0200432 */
433 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
434 return 0;
Ville Syrjälä724a6902014-04-09 13:28:48 +0300435 /*
436 * On pipe B and C we don't support the PSR interrupt yet, on pipe
437 * A the same bit is for perf counters which we don't use either.
438 */
439 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
440 return 0;
Imre Deak10c59c52014-02-10 18:42:48 +0200441
442 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
443 SPRITE0_FLIP_DONE_INT_EN_VLV |
444 SPRITE1_FLIP_DONE_INT_EN_VLV);
445 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
446 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
447 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
448 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
449
450 return enable_mask;
451}
452
Imre Deak755e9012014-02-10 18:42:47 +0200453void
454i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
455 u32 status_mask)
456{
457 u32 enable_mask;
458
Imre Deak10c59c52014-02-10 18:42:48 +0200459 if (IS_VALLEYVIEW(dev_priv->dev))
460 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
461 status_mask);
462 else
463 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200464 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
465}
466
467void
468i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
469 u32 status_mask)
470{
471 u32 enable_mask;
472
Imre Deak10c59c52014-02-10 18:42:48 +0200473 if (IS_VALLEYVIEW(dev_priv->dev))
474 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
475 status_mask);
476 else
477 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200478 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
479}
480
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000481/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300482 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Zhao Yakui01c66882009-10-28 05:10:00 +0000483 */
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300484static void i915_enable_asle_pipestat(struct drm_device *dev)
Zhao Yakui01c66882009-10-28 05:10:00 +0000485{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300486 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000487
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300488 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
489 return;
490
Daniel Vetter13321782014-09-15 14:55:29 +0200491 spin_lock_irq(&dev_priv->irq_lock);
Zhao Yakui01c66882009-10-28 05:10:00 +0000492
Imre Deak755e9012014-02-10 18:42:47 +0200493 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
Jani Nikulaf8987802013-04-29 13:02:53 +0300494 if (INTEL_INFO(dev)->gen >= 4)
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200495 i915_enable_pipestat(dev_priv, PIPE_A,
Imre Deak755e9012014-02-10 18:42:47 +0200496 PIPE_LEGACY_BLC_EVENT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000497
Daniel Vetter13321782014-09-15 14:55:29 +0200498 spin_unlock_irq(&dev_priv->irq_lock);
Zhao Yakui01c66882009-10-28 05:10:00 +0000499}
500
Ville Syrjäläf75f3742014-05-15 20:20:36 +0300501/*
502 * This timing diagram depicts the video signal in and
503 * around the vertical blanking period.
504 *
505 * Assumptions about the fictitious mode used in this example:
506 * vblank_start >= 3
507 * vsync_start = vblank_start + 1
508 * vsync_end = vblank_start + 2
509 * vtotal = vblank_start + 3
510 *
511 * start of vblank:
512 * latch double buffered registers
513 * increment frame counter (ctg+)
514 * generate start of vblank interrupt (gen4+)
515 * |
516 * | frame start:
517 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
518 * | may be shifted forward 1-3 extra lines via PIPECONF
519 * | |
520 * | | start of vsync:
521 * | | generate vsync interrupt
522 * | | |
523 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
524 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
525 * ----va---> <-----------------vb--------------------> <--------va-------------
526 * | | <----vs-----> |
527 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
528 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
529 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
530 * | | |
531 * last visible pixel first visible pixel
532 * | increment frame counter (gen3/4)
533 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
534 *
535 * x = horizontal active
536 * _ = horizontal blanking
537 * hs = horizontal sync
538 * va = vertical active
539 * vb = vertical blanking
540 * vs = vertical sync
541 * vbs = vblank_start (number)
542 *
543 * Summary:
544 * - most events happen at the start of horizontal sync
545 * - frame start happens at the start of horizontal blank, 1-4 lines
546 * (depending on PIPECONF settings) after the start of vblank
547 * - gen3/4 pixel and frame counter are synchronized with the start
548 * of horizontal active on the first line of vertical active
549 */
550
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +0300551static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
552{
553 /* Gen2 doesn't have a hardware frame counter */
554 return 0;
555}
556
Keith Packard42f52ef2008-10-18 19:39:29 -0700557/* Called from drm generic code, passed a 'crtc', which
558 * we use as a pipe index
559 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700560static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700561{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300562 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700563 unsigned long high_frame;
564 unsigned long low_frame;
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300565 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
Daniel Vetterf3a5c3f2015-02-13 21:03:44 +0100566 struct intel_crtc *intel_crtc =
567 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
Maarten Lankhorstfc467a222015-06-01 12:50:07 +0200568 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700569
Daniel Vetterf3a5c3f2015-02-13 21:03:44 +0100570 htotal = mode->crtc_htotal;
571 hsync_start = mode->crtc_hsync_start;
572 vbl_start = mode->crtc_vblank_start;
573 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
574 vbl_start = DIV_ROUND_UP(vbl_start, 2);
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300575
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300576 /* Convert to pixel count */
577 vbl_start *= htotal;
578
579 /* Start of vblank event occurs at start of hsync */
580 vbl_start -= htotal - hsync_start;
581
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800582 high_frame = PIPEFRAME(pipe);
583 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100584
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700585 /*
586 * High & low register fields aren't synchronized, so make sure
587 * we get a low value that's stable across two reads of the high
588 * register.
589 */
590 do {
Chris Wilson5eddb702010-09-11 13:48:45 +0100591 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300592 low = I915_READ(low_frame);
Chris Wilson5eddb702010-09-11 13:48:45 +0100593 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700594 } while (high1 != high2);
595
Chris Wilson5eddb702010-09-11 13:48:45 +0100596 high1 >>= PIPE_FRAME_HIGH_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300597 pixel = low & PIPE_PIXEL_MASK;
Chris Wilson5eddb702010-09-11 13:48:45 +0100598 low >>= PIPE_FRAME_LOW_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300599
600 /*
601 * The frame counter increments at beginning of active.
602 * Cook up a vblank counter by also checking the pixel
603 * counter against vblank start.
604 */
Ville Syrjäläedc08d02013-11-06 13:56:27 -0200605 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700606}
607
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700608static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800609{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300610 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800611 int reg = PIPE_FRMCOUNT_GM45(pipe);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800612
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800613 return I915_READ(reg);
614}
615
Mario Kleinerad3543e2013-10-30 05:13:08 +0100616/* raw reads, only for fast reads of display block, no need for forcewake etc. */
617#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
Mario Kleinerad3543e2013-10-30 05:13:08 +0100618
Ville Syrjäläa225f072014-04-29 13:35:45 +0300619static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
620{
621 struct drm_device *dev = crtc->base.dev;
622 struct drm_i915_private *dev_priv = dev->dev_private;
Maarten Lankhorstfc467a222015-06-01 12:50:07 +0200623 const struct drm_display_mode *mode = &crtc->base.hwmode;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300624 enum pipe pipe = crtc->pipe;
Ville Syrjälä80715b22014-05-15 20:23:23 +0300625 int position, vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300626
Ville Syrjälä80715b22014-05-15 20:23:23 +0300627 vtotal = mode->crtc_vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300628 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
629 vtotal /= 2;
630
631 if (IS_GEN2(dev))
632 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
633 else
634 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
635
636 /*
Ville Syrjälä80715b22014-05-15 20:23:23 +0300637 * See update_scanline_offset() for the details on the
638 * scanline_offset adjustment.
Ville Syrjäläa225f072014-04-29 13:35:45 +0300639 */
Ville Syrjälä80715b22014-05-15 20:23:23 +0300640 return (position + crtc->scanline_offset) % vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300641}
642
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700643static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
Ville Syrjäläabca9e42013-10-28 20:50:48 +0200644 unsigned int flags, int *vpos, int *hpos,
645 ktime_t *stime, ktime_t *etime)
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100646{
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300647 struct drm_i915_private *dev_priv = dev->dev_private;
648 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
649 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstfc467a222015-06-01 12:50:07 +0200650 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300651 int position;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300652 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100653 bool in_vbl = true;
654 int ret = 0;
Mario Kleinerad3543e2013-10-30 05:13:08 +0100655 unsigned long irqflags;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100656
Maarten Lankhorstfc467a222015-06-01 12:50:07 +0200657 if (WARN_ON(!mode->crtc_clock)) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100658 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800659 "pipe %c\n", pipe_name(pipe));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100660 return 0;
661 }
662
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300663 htotal = mode->crtc_htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300664 hsync_start = mode->crtc_hsync_start;
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300665 vtotal = mode->crtc_vtotal;
666 vbl_start = mode->crtc_vblank_start;
667 vbl_end = mode->crtc_vblank_end;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100668
Ville Syrjäläd31faf62013-10-28 16:31:41 +0200669 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
670 vbl_start = DIV_ROUND_UP(vbl_start, 2);
671 vbl_end /= 2;
672 vtotal /= 2;
673 }
674
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300675 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
676
Mario Kleinerad3543e2013-10-30 05:13:08 +0100677 /*
678 * Lock uncore.lock, as we will do multiple timing critical raw
679 * register reads, potentially with preemption disabled, so the
680 * following code must not block on uncore.lock.
681 */
682 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300683
Mario Kleinerad3543e2013-10-30 05:13:08 +0100684 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
685
686 /* Get optional system timestamp before query. */
687 if (stime)
688 *stime = ktime_get();
689
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300690 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100691 /* No obvious pixelcount register. Only query vertical
692 * scanout position from Display scan line register.
693 */
Ville Syrjäläa225f072014-04-29 13:35:45 +0300694 position = __intel_get_crtc_scanline(intel_crtc);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100695 } else {
696 /* Have access to pixelcount since start of frame.
697 * We can split this into vertical and horizontal
698 * scanout position.
699 */
Mario Kleinerad3543e2013-10-30 05:13:08 +0100700 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100701
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300702 /* convert to pixel counts */
703 vbl_start *= htotal;
704 vbl_end *= htotal;
705 vtotal *= htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300706
707 /*
Ville Syrjälä7e78f1cb2014-04-29 13:35:49 +0300708 * In interlaced modes, the pixel counter counts all pixels,
709 * so one field will have htotal more pixels. In order to avoid
710 * the reported position from jumping backwards when the pixel
711 * counter is beyond the length of the shorter field, just
712 * clamp the position the length of the shorter field. This
713 * matches how the scanline counter based position works since
714 * the scanline counter doesn't count the two half lines.
715 */
716 if (position >= vtotal)
717 position = vtotal - 1;
718
719 /*
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300720 * Start of vblank interrupt is triggered at start of hsync,
721 * just prior to the first active line of vblank. However we
722 * consider lines to start at the leading edge of horizontal
723 * active. So, should we get here before we've crossed into
724 * the horizontal active of the first line in vblank, we would
725 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
726 * always add htotal-hsync_start to the current pixel position.
727 */
728 position = (position + htotal - hsync_start) % vtotal;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300729 }
730
Mario Kleinerad3543e2013-10-30 05:13:08 +0100731 /* Get optional system timestamp after query. */
732 if (etime)
733 *etime = ktime_get();
734
735 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
736
737 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
738
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300739 in_vbl = position >= vbl_start && position < vbl_end;
740
741 /*
742 * While in vblank, position will be negative
743 * counting up towards 0 at vbl_end. And outside
744 * vblank, position will be positive counting
745 * up since vbl_end.
746 */
747 if (position >= vbl_start)
748 position -= vbl_end;
749 else
750 position += vtotal - vbl_end;
751
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300752 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300753 *vpos = position;
754 *hpos = 0;
755 } else {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100756 *vpos = position / htotal;
757 *hpos = position - (*vpos * htotal);
758 }
759
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100760 /* In vblank? */
761 if (in_vbl)
Daniel Vetter3d3cbd82014-09-10 17:36:11 +0200762 ret |= DRM_SCANOUTPOS_IN_VBLANK;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100763
764 return ret;
765}
766
Ville Syrjäläa225f072014-04-29 13:35:45 +0300767int intel_get_crtc_scanline(struct intel_crtc *crtc)
768{
769 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
770 unsigned long irqflags;
771 int position;
772
773 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
774 position = __intel_get_crtc_scanline(crtc);
775 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
776
777 return position;
778}
779
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700780static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100781 int *max_error,
782 struct timeval *vblank_time,
783 unsigned flags)
784{
Chris Wilson4041b852011-01-22 10:07:56 +0000785 struct drm_crtc *crtc;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100786
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700787 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
Chris Wilson4041b852011-01-22 10:07:56 +0000788 DRM_ERROR("Invalid crtc %d\n", pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100789 return -EINVAL;
790 }
791
792 /* Get drm_crtc to timestamp: */
Chris Wilson4041b852011-01-22 10:07:56 +0000793 crtc = intel_get_crtc_for_pipe(dev, pipe);
794 if (crtc == NULL) {
795 DRM_ERROR("Invalid crtc %d\n", pipe);
796 return -EINVAL;
797 }
798
Maarten Lankhorstfc467a222015-06-01 12:50:07 +0200799 if (!crtc->hwmode.crtc_clock) {
Chris Wilson4041b852011-01-22 10:07:56 +0000800 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
801 return -EBUSY;
802 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100803
804 /* Helper routine in DRM core does all the work: */
Chris Wilson4041b852011-01-22 10:07:56 +0000805 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
806 vblank_time, flags,
Ville Syrjälä7da903e2013-10-26 17:57:31 +0300807 crtc,
Maarten Lankhorstfc467a222015-06-01 12:50:07 +0200808 &crtc->hwmode);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100809}
810
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200811static void ironlake_rps_change_irq_handler(struct drm_device *dev)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800812{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300813 struct drm_i915_private *dev_priv = dev->dev_private;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000814 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +0200815 u8 new_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200816
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200817 spin_lock(&mchdev_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800818
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200819 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
820
Daniel Vetter20e4d402012-08-08 23:35:39 +0200821 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200822
Jesse Barnes7648fa92010-05-20 14:28:11 -0700823 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000824 busy_up = I915_READ(RCPREVBSYTUPAVG);
825 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800826 max_avg = I915_READ(RCBMAXAVG);
827 min_avg = I915_READ(RCBMINAVG);
828
829 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000830 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200831 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
832 new_delay = dev_priv->ips.cur_delay - 1;
833 if (new_delay < dev_priv->ips.max_delay)
834 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000835 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200836 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
837 new_delay = dev_priv->ips.cur_delay + 1;
838 if (new_delay > dev_priv->ips.min_delay)
839 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800840 }
841
Jesse Barnes7648fa92010-05-20 14:28:11 -0700842 if (ironlake_set_drps(dev, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +0200843 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800844
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200845 spin_unlock(&mchdev_lock);
Daniel Vetter92703882012-08-09 16:46:01 +0200846
Jesse Barnesf97108d2010-01-29 11:27:07 -0800847 return;
848}
849
Chris Wilson74cdb332015-04-07 16:21:05 +0100850static void notify_ring(struct intel_engine_cs *ring)
Chris Wilson549f7362010-10-19 11:19:32 +0100851{
Oscar Mateo93b0a4e2014-05-22 14:13:36 +0100852 if (!intel_ring_initialized(ring))
Chris Wilson475553d2011-01-20 09:52:56 +0000853 return;
854
John Harrisonbcfcc8b2014-12-05 13:49:36 +0000855 trace_i915_gem_request_notify(ring);
Chris Wilson9862e602011-01-04 22:22:17 +0000856
Chris Wilson549f7362010-10-19 11:19:32 +0100857 wake_up_all(&ring->irq_queue);
Chris Wilson549f7362010-10-19 11:19:32 +0100858}
859
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000860static void vlv_c0_read(struct drm_i915_private *dev_priv,
861 struct intel_rps_ei *ei)
Deepak S31685c22014-07-03 17:33:01 -0400862{
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000863 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
864 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
865 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
Deepak S31685c22014-07-03 17:33:01 -0400866}
867
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000868static bool vlv_c0_above(struct drm_i915_private *dev_priv,
869 const struct intel_rps_ei *old,
870 const struct intel_rps_ei *now,
871 int threshold)
Deepak S31685c22014-07-03 17:33:01 -0400872{
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000873 u64 time, c0;
Deepak S31685c22014-07-03 17:33:01 -0400874
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000875 if (old->cz_clock == 0)
876 return false;
Deepak S31685c22014-07-03 17:33:01 -0400877
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000878 time = now->cz_clock - old->cz_clock;
879 time *= threshold * dev_priv->mem_freq;
Deepak S31685c22014-07-03 17:33:01 -0400880
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000881 /* Workload can be split between render + media, e.g. SwapBuffers
882 * being blitted in X after being rendered in mesa. To account for
883 * this we need to combine both engines into our activity counter.
884 */
885 c0 = now->render_c0 - old->render_c0;
886 c0 += now->media_c0 - old->media_c0;
887 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
Deepak S31685c22014-07-03 17:33:01 -0400888
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000889 return c0 >= time;
890}
Deepak S31685c22014-07-03 17:33:01 -0400891
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000892void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
893{
894 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
895 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000896}
897
898static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
899{
900 struct intel_rps_ei now;
901 u32 events = 0;
902
Chris Wilson6f4b12f82015-03-18 09:48:23 +0000903 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000904 return 0;
905
906 vlv_c0_read(dev_priv, &now);
907 if (now.cz_clock == 0)
908 return 0;
Deepak S31685c22014-07-03 17:33:01 -0400909
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000910 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
911 if (!vlv_c0_above(dev_priv,
912 &dev_priv->rps.down_ei, &now,
Chris Wilson8fb55192015-04-07 16:20:28 +0100913 dev_priv->rps.down_threshold))
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000914 events |= GEN6_PM_RP_DOWN_THRESHOLD;
915 dev_priv->rps.down_ei = now;
Deepak S31685c22014-07-03 17:33:01 -0400916 }
917
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000918 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
919 if (vlv_c0_above(dev_priv,
920 &dev_priv->rps.up_ei, &now,
Chris Wilson8fb55192015-04-07 16:20:28 +0100921 dev_priv->rps.up_threshold))
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000922 events |= GEN6_PM_RP_UP_THRESHOLD;
923 dev_priv->rps.up_ei = now;
924 }
925
926 return events;
Deepak S31685c22014-07-03 17:33:01 -0400927}
928
Chris Wilsonf5a4c672015-04-27 13:41:23 +0100929static bool any_waiters(struct drm_i915_private *dev_priv)
930{
931 struct intel_engine_cs *ring;
932 int i;
933
934 for_each_ring(ring, dev_priv, i)
935 if (ring->irq_refcount)
936 return true;
937
938 return false;
939}
940
Ben Widawsky4912d042011-04-25 11:25:20 -0700941static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800942{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300943 struct drm_i915_private *dev_priv =
944 container_of(work, struct drm_i915_private, rps.work);
Chris Wilson8d3afd72015-05-21 21:01:47 +0100945 bool client_boost;
946 int new_delay, adj, min, max;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300947 u32 pm_iir;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800948
Daniel Vetter59cdb632013-07-04 23:35:28 +0200949 spin_lock_irq(&dev_priv->irq_lock);
Imre Deakd4d70aa2014-11-19 15:30:04 +0200950 /* Speed up work cancelation during disabling rps interrupts. */
951 if (!dev_priv->rps.interrupts_enabled) {
952 spin_unlock_irq(&dev_priv->irq_lock);
953 return;
954 }
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200955 pm_iir = dev_priv->rps.pm_iir;
956 dev_priv->rps.pm_iir = 0;
Imre Deaka72fbc32014-11-05 20:48:31 +0200957 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
958 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Chris Wilson8d3afd72015-05-21 21:01:47 +0100959 client_boost = dev_priv->rps.client_boost;
960 dev_priv->rps.client_boost = false;
Daniel Vetter59cdb632013-07-04 23:35:28 +0200961 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawsky4912d042011-04-25 11:25:20 -0700962
Paulo Zanoni60611c12013-08-15 11:50:01 -0300963 /* Make sure we didn't queue anything we're not going to process. */
Deepak Sa6706b42014-03-15 20:23:22 +0530964 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
Paulo Zanoni60611c12013-08-15 11:50:01 -0300965
Chris Wilson8d3afd72015-05-21 21:01:47 +0100966 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800967 return;
968
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700969 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100970
Chris Wilson43cf3bf2015-03-18 09:48:22 +0000971 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
972
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100973 adj = dev_priv->rps.last_adj;
Chris Wilsonedcf2842015-04-07 16:20:29 +0100974 new_delay = dev_priv->rps.cur_freq;
Chris Wilson8d3afd72015-05-21 21:01:47 +0100975 min = dev_priv->rps.min_freq_softlimit;
976 max = dev_priv->rps.max_freq_softlimit;
977
978 if (client_boost) {
979 new_delay = dev_priv->rps.max_freq_softlimit;
980 adj = 0;
981 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100982 if (adj > 0)
983 adj *= 2;
Chris Wilsonedcf2842015-04-07 16:20:29 +0100984 else /* CHV needs even encode values */
985 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
Ville Syrjälä74250342013-06-25 21:38:11 +0300986 /*
987 * For better performance, jump directly
988 * to RPe if we're below it.
989 */
Chris Wilsonedcf2842015-04-07 16:20:29 +0100990 if (new_delay < dev_priv->rps.efficient_freq - adj) {
Ben Widawskyb39fb292014-03-19 18:31:11 -0700991 new_delay = dev_priv->rps.efficient_freq;
Chris Wilsonedcf2842015-04-07 16:20:29 +0100992 adj = 0;
993 }
Chris Wilsonf5a4c672015-04-27 13:41:23 +0100994 } else if (any_waiters(dev_priv)) {
995 adj = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100996 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
Ben Widawskyb39fb292014-03-19 18:31:11 -0700997 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
998 new_delay = dev_priv->rps.efficient_freq;
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100999 else
Ben Widawskyb39fb292014-03-19 18:31:11 -07001000 new_delay = dev_priv->rps.min_freq_softlimit;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001001 adj = 0;
1002 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1003 if (adj < 0)
1004 adj *= 2;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001005 else /* CHV needs even encode values */
1006 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001007 } else { /* unknown event */
Chris Wilsonedcf2842015-04-07 16:20:29 +01001008 adj = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001009 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001010
Chris Wilsonedcf2842015-04-07 16:20:29 +01001011 dev_priv->rps.last_adj = adj;
1012
Ben Widawsky79249632012-09-07 19:43:42 -07001013 /* sysfs frequency interfaces may have snuck in while servicing the
1014 * interrupt
1015 */
Chris Wilsonedcf2842015-04-07 16:20:29 +01001016 new_delay += adj;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001017 new_delay = clamp_t(int, new_delay, min, max);
Deepak S27544362014-01-27 21:35:05 +05301018
Ville Syrjäläffe02b42015-02-02 19:09:50 +02001019 intel_set_rps(dev_priv->dev, new_delay);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001020
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001021 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001022}
1023
Ben Widawskye3689192012-05-25 16:56:22 -07001024
1025/**
1026 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1027 * occurred.
1028 * @work: workqueue struct
1029 *
1030 * Doesn't actually do anything except notify userspace. As a consequence of
1031 * this event, userspace should try to remap the bad rows since statistically
1032 * it is likely the same row is more likely to go bad again.
1033 */
1034static void ivybridge_parity_work(struct work_struct *work)
1035{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001036 struct drm_i915_private *dev_priv =
1037 container_of(work, struct drm_i915_private, l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001038 u32 error_status, row, bank, subbank;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001039 char *parity_event[6];
Ben Widawskye3689192012-05-25 16:56:22 -07001040 uint32_t misccpctl;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001041 uint8_t slice = 0;
Ben Widawskye3689192012-05-25 16:56:22 -07001042
1043 /* We must turn off DOP level clock gating to access the L3 registers.
1044 * In order to prevent a get/put style interface, acquire struct mutex
1045 * any time we access those registers.
1046 */
1047 mutex_lock(&dev_priv->dev->struct_mutex);
1048
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001049 /* If we've screwed up tracking, just let the interrupt fire again */
1050 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1051 goto out;
1052
Ben Widawskye3689192012-05-25 16:56:22 -07001053 misccpctl = I915_READ(GEN7_MISCCPCTL);
1054 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1055 POSTING_READ(GEN7_MISCCPCTL);
1056
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001057 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1058 u32 reg;
Ben Widawskye3689192012-05-25 16:56:22 -07001059
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001060 slice--;
1061 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1062 break;
1063
1064 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1065
1066 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1067
1068 error_status = I915_READ(reg);
1069 row = GEN7_PARITY_ERROR_ROW(error_status);
1070 bank = GEN7_PARITY_ERROR_BANK(error_status);
1071 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1072
1073 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1074 POSTING_READ(reg);
1075
1076 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1077 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1078 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1079 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1080 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1081 parity_event[5] = NULL;
1082
Dave Airlie5bdebb12013-10-11 14:07:25 +10001083 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001084 KOBJ_CHANGE, parity_event);
1085
1086 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1087 slice, row, bank, subbank);
1088
1089 kfree(parity_event[4]);
1090 kfree(parity_event[3]);
1091 kfree(parity_event[2]);
1092 kfree(parity_event[1]);
1093 }
Ben Widawskye3689192012-05-25 16:56:22 -07001094
1095 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1096
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001097out:
1098 WARN_ON(dev_priv->l3_parity.which_slice);
Daniel Vetter4cb21832014-09-15 14:55:26 +02001099 spin_lock_irq(&dev_priv->irq_lock);
Daniel Vetter480c8032014-07-16 09:49:40 +02001100 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
Daniel Vetter4cb21832014-09-15 14:55:26 +02001101 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001102
1103 mutex_unlock(&dev_priv->dev->struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001104}
1105
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001106static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
Ben Widawskye3689192012-05-25 16:56:22 -07001107{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001108 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskye3689192012-05-25 16:56:22 -07001109
Ben Widawsky040d2ba2013-09-19 11:01:40 -07001110 if (!HAS_L3_DPF(dev))
Ben Widawskye3689192012-05-25 16:56:22 -07001111 return;
1112
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001113 spin_lock(&dev_priv->irq_lock);
Daniel Vetter480c8032014-07-16 09:49:40 +02001114 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001115 spin_unlock(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001116
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001117 iir &= GT_PARITY_ERROR(dev);
1118 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1119 dev_priv->l3_parity.which_slice |= 1 << 1;
1120
1121 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1122 dev_priv->l3_parity.which_slice |= 1 << 0;
1123
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001124 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001125}
1126
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001127static void ilk_gt_irq_handler(struct drm_device *dev,
1128 struct drm_i915_private *dev_priv,
1129 u32 gt_iir)
1130{
1131 if (gt_iir &
1132 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Chris Wilson74cdb332015-04-07 16:21:05 +01001133 notify_ring(&dev_priv->ring[RCS]);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001134 if (gt_iir & ILK_BSD_USER_INTERRUPT)
Chris Wilson74cdb332015-04-07 16:21:05 +01001135 notify_ring(&dev_priv->ring[VCS]);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001136}
1137
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001138static void snb_gt_irq_handler(struct drm_device *dev,
1139 struct drm_i915_private *dev_priv,
1140 u32 gt_iir)
1141{
1142
Ben Widawskycc609d52013-05-28 19:22:29 -07001143 if (gt_iir &
1144 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Chris Wilson74cdb332015-04-07 16:21:05 +01001145 notify_ring(&dev_priv->ring[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001146 if (gt_iir & GT_BSD_USER_INTERRUPT)
Chris Wilson74cdb332015-04-07 16:21:05 +01001147 notify_ring(&dev_priv->ring[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001148 if (gt_iir & GT_BLT_USER_INTERRUPT)
Chris Wilson74cdb332015-04-07 16:21:05 +01001149 notify_ring(&dev_priv->ring[BCS]);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001150
Ben Widawskycc609d52013-05-28 19:22:29 -07001151 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1152 GT_BSD_CS_ERROR_INTERRUPT |
Daniel Vetteraaecdf62014-11-04 15:52:22 +01001153 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1154 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
Ben Widawskye3689192012-05-25 16:56:22 -07001155
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001156 if (gt_iir & GT_PARITY_ERROR(dev))
1157 ivybridge_parity_error_irq_handler(dev, gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001158}
1159
Chris Wilson74cdb332015-04-07 16:21:05 +01001160static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
Ben Widawskyabd58f02013-11-02 21:07:09 -07001161 u32 master_ctl)
1162{
Ben Widawskyabd58f02013-11-02 21:07:09 -07001163 irqreturn_t ret = IRQ_NONE;
1164
1165 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
Chris Wilson74cdb332015-04-07 16:21:05 +01001166 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
Ben Widawskyabd58f02013-11-02 21:07:09 -07001167 if (tmp) {
Chris Wilsoncb0d2052015-04-07 16:21:04 +01001168 I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001169 ret = IRQ_HANDLED;
Thomas Daniele981e7b2014-07-24 17:04:39 +01001170
Chris Wilson74cdb332015-04-07 16:21:05 +01001171 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1172 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1173 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1174 notify_ring(&dev_priv->ring[RCS]);
Thomas Daniele981e7b2014-07-24 17:04:39 +01001175
Chris Wilson74cdb332015-04-07 16:21:05 +01001176 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1177 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1178 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1179 notify_ring(&dev_priv->ring[BCS]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001180 } else
1181 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1182 }
1183
Zhao Yakui85f9b5f2014-04-17 10:37:38 +08001184 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
Chris Wilson74cdb332015-04-07 16:21:05 +01001185 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
Ben Widawskyabd58f02013-11-02 21:07:09 -07001186 if (tmp) {
Chris Wilsoncb0d2052015-04-07 16:21:04 +01001187 I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001188 ret = IRQ_HANDLED;
Thomas Daniele981e7b2014-07-24 17:04:39 +01001189
Chris Wilson74cdb332015-04-07 16:21:05 +01001190 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1191 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1192 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1193 notify_ring(&dev_priv->ring[VCS]);
Thomas Daniele981e7b2014-07-24 17:04:39 +01001194
Chris Wilson74cdb332015-04-07 16:21:05 +01001195 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1196 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1197 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1198 notify_ring(&dev_priv->ring[VCS2]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001199 } else
1200 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1201 }
1202
Chris Wilson74cdb332015-04-07 16:21:05 +01001203 if (master_ctl & GEN8_GT_VECS_IRQ) {
1204 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1205 if (tmp) {
1206 I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1207 ret = IRQ_HANDLED;
1208
1209 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1210 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1211 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1212 notify_ring(&dev_priv->ring[VECS]);
1213 } else
1214 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1215 }
1216
Ben Widawsky09610212014-05-15 20:58:08 +03001217 if (master_ctl & GEN8_GT_PM_IRQ) {
Chris Wilson74cdb332015-04-07 16:21:05 +01001218 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
Ben Widawsky09610212014-05-15 20:58:08 +03001219 if (tmp & dev_priv->pm_rps_events) {
Chris Wilsoncb0d2052015-04-07 16:21:04 +01001220 I915_WRITE_FW(GEN8_GT_IIR(2),
1221 tmp & dev_priv->pm_rps_events);
Oscar Mateo38cc46d2014-06-16 16:10:59 +01001222 ret = IRQ_HANDLED;
Imre Deakc9a9a262014-11-05 20:48:37 +02001223 gen6_rps_irq_handler(dev_priv, tmp);
Ben Widawsky09610212014-05-15 20:58:08 +03001224 } else
1225 DRM_ERROR("The master control interrupt lied (PM)!\n");
1226 }
1227
Ben Widawskyabd58f02013-11-02 21:07:09 -07001228 return ret;
1229}
1230
Imre Deak63c88d22015-07-20 14:43:39 -07001231static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1232{
1233 switch (port) {
1234 case PORT_A:
1235 return val & BXT_PORTA_HOTPLUG_LONG_DETECT;
1236 case PORT_B:
1237 return val & PORTB_HOTPLUG_LONG_DETECT;
1238 case PORT_C:
1239 return val & PORTC_HOTPLUG_LONG_DETECT;
1240 case PORT_D:
1241 return val & PORTD_HOTPLUG_LONG_DETECT;
1242 default:
1243 return false;
1244 }
1245}
1246
Jani Nikula676574d2015-05-28 15:43:53 +03001247static bool pch_port_hotplug_long_detect(enum port port, u32 val)
Dave Airlie13cf5502014-06-18 11:29:35 +10001248{
1249 switch (port) {
Dave Airlie13cf5502014-06-18 11:29:35 +10001250 case PORT_B:
Jani Nikula676574d2015-05-28 15:43:53 +03001251 return val & PORTB_HOTPLUG_LONG_DETECT;
Dave Airlie13cf5502014-06-18 11:29:35 +10001252 case PORT_C:
Jani Nikula676574d2015-05-28 15:43:53 +03001253 return val & PORTC_HOTPLUG_LONG_DETECT;
Dave Airlie13cf5502014-06-18 11:29:35 +10001254 case PORT_D:
Jani Nikula676574d2015-05-28 15:43:53 +03001255 return val & PORTD_HOTPLUG_LONG_DETECT;
1256 default:
1257 return false;
Dave Airlie13cf5502014-06-18 11:29:35 +10001258 }
1259}
1260
Jani Nikula676574d2015-05-28 15:43:53 +03001261static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
Dave Airlie13cf5502014-06-18 11:29:35 +10001262{
1263 switch (port) {
Dave Airlie13cf5502014-06-18 11:29:35 +10001264 case PORT_B:
Jani Nikula676574d2015-05-28 15:43:53 +03001265 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
Dave Airlie13cf5502014-06-18 11:29:35 +10001266 case PORT_C:
Jani Nikula676574d2015-05-28 15:43:53 +03001267 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
Dave Airlie13cf5502014-06-18 11:29:35 +10001268 case PORT_D:
Jani Nikula676574d2015-05-28 15:43:53 +03001269 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1270 default:
1271 return false;
Dave Airlie13cf5502014-06-18 11:29:35 +10001272 }
1273}
1274
Jani Nikula676574d2015-05-28 15:43:53 +03001275/* Get a bit mask of pins that have triggered, and which ones may be long. */
Imre Deakfd63e2a2015-07-21 15:32:44 -07001276static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
Jani Nikula8c841e52015-06-18 13:06:17 +03001277 u32 hotplug_trigger, u32 dig_hotplug_reg,
Imre Deakfd63e2a2015-07-21 15:32:44 -07001278 const u32 hpd[HPD_NUM_PINS],
1279 bool long_pulse_detect(enum port port, u32 val))
Jani Nikula676574d2015-05-28 15:43:53 +03001280{
Jani Nikula8c841e52015-06-18 13:06:17 +03001281 enum port port;
Jani Nikula676574d2015-05-28 15:43:53 +03001282 int i;
1283
1284 *pin_mask = 0;
1285 *long_mask = 0;
1286
Jani Nikula676574d2015-05-28 15:43:53 +03001287 for_each_hpd_pin(i) {
Jani Nikula8c841e52015-06-18 13:06:17 +03001288 if ((hpd[i] & hotplug_trigger) == 0)
1289 continue;
Jani Nikula676574d2015-05-28 15:43:53 +03001290
Jani Nikula8c841e52015-06-18 13:06:17 +03001291 *pin_mask |= BIT(i);
1292
Imre Deakcc24fcd2015-07-21 15:32:45 -07001293 if (!intel_hpd_pin_to_port(i, &port))
1294 continue;
1295
Imre Deakfd63e2a2015-07-21 15:32:44 -07001296 if (long_pulse_detect(port, dig_hotplug_reg))
Jani Nikula8c841e52015-06-18 13:06:17 +03001297 *long_mask |= BIT(i);
Jani Nikula676574d2015-05-28 15:43:53 +03001298 }
1299
1300 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1301 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1302
1303}
1304
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001305static void gmbus_irq_handler(struct drm_device *dev)
1306{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001307 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter28c70f12012-12-01 13:53:45 +01001308
Daniel Vetter28c70f12012-12-01 13:53:45 +01001309 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001310}
1311
Daniel Vetterce99c252012-12-01 13:53:47 +01001312static void dp_aux_irq_handler(struct drm_device *dev)
1313{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001314 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001315
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001316 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +01001317}
1318
Shuang He8bf1e9f2013-10-15 18:55:27 +01001319#if defined(CONFIG_DEBUG_FS)
Daniel Vetter277de952013-10-18 16:37:07 +02001320static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1321 uint32_t crc0, uint32_t crc1,
1322 uint32_t crc2, uint32_t crc3,
1323 uint32_t crc4)
Shuang He8bf1e9f2013-10-15 18:55:27 +01001324{
1325 struct drm_i915_private *dev_priv = dev->dev_private;
1326 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1327 struct intel_pipe_crc_entry *entry;
Damien Lespiauac2300d2013-10-15 18:55:30 +01001328 int head, tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001329
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001330 spin_lock(&pipe_crc->lock);
1331
Damien Lespiau0c912c72013-10-15 18:55:37 +01001332 if (!pipe_crc->entries) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001333 spin_unlock(&pipe_crc->lock);
Daniel Vetter34273622014-11-26 16:29:04 +01001334 DRM_DEBUG_KMS("spurious interrupt\n");
Damien Lespiau0c912c72013-10-15 18:55:37 +01001335 return;
1336 }
1337
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001338 head = pipe_crc->head;
1339 tail = pipe_crc->tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001340
1341 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001342 spin_unlock(&pipe_crc->lock);
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001343 DRM_ERROR("CRC buffer overflowing\n");
1344 return;
1345 }
1346
1347 entry = &pipe_crc->entries[head];
Shuang He8bf1e9f2013-10-15 18:55:27 +01001348
Daniel Vetter8bc5e952013-10-16 22:55:49 +02001349 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
Daniel Vettereba94eb2013-10-16 22:55:46 +02001350 entry->crc[0] = crc0;
1351 entry->crc[1] = crc1;
1352 entry->crc[2] = crc2;
1353 entry->crc[3] = crc3;
1354 entry->crc[4] = crc4;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001355
1356 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001357 pipe_crc->head = head;
1358
1359 spin_unlock(&pipe_crc->lock);
Damien Lespiau07144422013-10-15 18:55:40 +01001360
1361 wake_up_interruptible(&pipe_crc->wq);
Shuang He8bf1e9f2013-10-15 18:55:27 +01001362}
Daniel Vetter277de952013-10-18 16:37:07 +02001363#else
1364static inline void
1365display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1366 uint32_t crc0, uint32_t crc1,
1367 uint32_t crc2, uint32_t crc3,
1368 uint32_t crc4) {}
1369#endif
Daniel Vettereba94eb2013-10-16 22:55:46 +02001370
Daniel Vetter277de952013-10-18 16:37:07 +02001371
1372static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vetter5a69b892013-10-16 22:55:52 +02001373{
1374 struct drm_i915_private *dev_priv = dev->dev_private;
1375
Daniel Vetter277de952013-10-18 16:37:07 +02001376 display_pipe_crc_irq_handler(dev, pipe,
1377 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1378 0, 0, 0, 0);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001379}
1380
Daniel Vetter277de952013-10-18 16:37:07 +02001381static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vettereba94eb2013-10-16 22:55:46 +02001382{
1383 struct drm_i915_private *dev_priv = dev->dev_private;
1384
Daniel Vetter277de952013-10-18 16:37:07 +02001385 display_pipe_crc_irq_handler(dev, pipe,
1386 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1387 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1388 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1389 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1390 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
Daniel Vettereba94eb2013-10-16 22:55:46 +02001391}
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001392
Daniel Vetter277de952013-10-18 16:37:07 +02001393static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001394{
1395 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001396 uint32_t res1, res2;
1397
1398 if (INTEL_INFO(dev)->gen >= 3)
1399 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1400 else
1401 res1 = 0;
1402
1403 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1404 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1405 else
1406 res2 = 0;
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001407
Daniel Vetter277de952013-10-18 16:37:07 +02001408 display_pipe_crc_irq_handler(dev, pipe,
1409 I915_READ(PIPE_CRC_RES_RED(pipe)),
1410 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1411 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1412 res1, res2);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001413}
Shuang He8bf1e9f2013-10-15 18:55:27 +01001414
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001415/* The RPS events need forcewake, so we add them to a work queue and mask their
1416 * IMR bits until the work is done. Other interrupts can be processed without
1417 * the work queue. */
1418static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
Ben Widawskybaf02a12013-05-28 19:22:24 -07001419{
Deepak Sa6706b42014-03-15 20:23:22 +05301420 if (pm_iir & dev_priv->pm_rps_events) {
Daniel Vetter59cdb632013-07-04 23:35:28 +02001421 spin_lock(&dev_priv->irq_lock);
Daniel Vetter480c8032014-07-16 09:49:40 +02001422 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
Imre Deakd4d70aa2014-11-19 15:30:04 +02001423 if (dev_priv->rps.interrupts_enabled) {
1424 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1425 queue_work(dev_priv->wq, &dev_priv->rps.work);
1426 }
Daniel Vetter59cdb632013-07-04 23:35:28 +02001427 spin_unlock(&dev_priv->irq_lock);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001428 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001429
Imre Deakc9a9a262014-11-05 20:48:37 +02001430 if (INTEL_INFO(dev_priv)->gen >= 8)
1431 return;
1432
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001433 if (HAS_VEBOX(dev_priv->dev)) {
1434 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
Chris Wilson74cdb332015-04-07 16:21:05 +01001435 notify_ring(&dev_priv->ring[VECS]);
Ben Widawsky12638c52013-05-28 19:22:31 -07001436
Daniel Vetteraaecdf62014-11-04 15:52:22 +01001437 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1438 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
Ben Widawsky12638c52013-05-28 19:22:31 -07001439 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001440}
1441
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03001442static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1443{
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03001444 if (!drm_handle_vblank(dev, pipe))
1445 return false;
1446
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03001447 return true;
1448}
1449
Imre Deakc1874ed2014-02-04 21:35:46 +02001450static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1451{
1452 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak91d181d2014-02-10 18:42:49 +02001453 u32 pipe_stats[I915_MAX_PIPES] = { };
Imre Deakc1874ed2014-02-04 21:35:46 +02001454 int pipe;
1455
Imre Deak58ead0d2014-02-04 21:35:47 +02001456 spin_lock(&dev_priv->irq_lock);
Damien Lespiau055e3932014-08-18 13:49:10 +01001457 for_each_pipe(dev_priv, pipe) {
Imre Deak91d181d2014-02-10 18:42:49 +02001458 int reg;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001459 u32 mask, iir_bit = 0;
Imre Deak91d181d2014-02-10 18:42:49 +02001460
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001461 /*
1462 * PIPESTAT bits get signalled even when the interrupt is
1463 * disabled with the mask bits, and some of the status bits do
1464 * not generate interrupts at all (like the underrun bit). Hence
1465 * we need to be careful that we only handle what we want to
1466 * handle.
1467 */
Daniel Vetter0f239f42014-09-30 10:56:49 +02001468
1469 /* fifo underruns are filterered in the underrun handler. */
1470 mask = PIPE_FIFO_UNDERRUN_STATUS;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001471
1472 switch (pipe) {
1473 case PIPE_A:
1474 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1475 break;
1476 case PIPE_B:
1477 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1478 break;
Ville Syrjälä3278f672014-04-09 13:28:49 +03001479 case PIPE_C:
1480 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1481 break;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001482 }
1483 if (iir & iir_bit)
1484 mask |= dev_priv->pipestat_irq_mask[pipe];
1485
1486 if (!mask)
Imre Deak91d181d2014-02-10 18:42:49 +02001487 continue;
1488
1489 reg = PIPESTAT(pipe);
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001490 mask |= PIPESTAT_INT_ENABLE_MASK;
1491 pipe_stats[pipe] = I915_READ(reg) & mask;
Imre Deakc1874ed2014-02-04 21:35:46 +02001492
1493 /*
1494 * Clear the PIPE*STAT regs before the IIR
1495 */
Imre Deak91d181d2014-02-10 18:42:49 +02001496 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1497 PIPESTAT_INT_STATUS_MASK))
Imre Deakc1874ed2014-02-04 21:35:46 +02001498 I915_WRITE(reg, pipe_stats[pipe]);
1499 }
Imre Deak58ead0d2014-02-04 21:35:47 +02001500 spin_unlock(&dev_priv->irq_lock);
Imre Deakc1874ed2014-02-04 21:35:46 +02001501
Damien Lespiau055e3932014-08-18 13:49:10 +01001502 for_each_pipe(dev_priv, pipe) {
Chris Wilsond6bbafa2014-09-05 07:13:24 +01001503 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1504 intel_pipe_handle_vblank(dev, pipe))
1505 intel_check_page_flip(dev, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001506
Imre Deak579a9b02014-02-04 21:35:48 +02001507 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
Imre Deakc1874ed2014-02-04 21:35:46 +02001508 intel_prepare_page_flip(dev, pipe);
1509 intel_finish_page_flip(dev, pipe);
1510 }
1511
1512 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1513 i9xx_pipe_crc_irq_handler(dev, pipe);
1514
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001515 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1516 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001517 }
1518
1519 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1520 gmbus_irq_handler(dev);
1521}
1522
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001523static void i9xx_hpd_irq_handler(struct drm_device *dev)
1524{
1525 struct drm_i915_private *dev_priv = dev->dev_private;
1526 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Jani Nikula676574d2015-05-28 15:43:53 +03001527 u32 pin_mask, long_mask;
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001528
Jani Nikula0d2e4292015-05-27 15:03:39 +03001529 if (!hotplug_status)
1530 return;
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001531
Jani Nikula0d2e4292015-05-27 15:03:39 +03001532 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1533 /*
1534 * Make sure hotplug status is cleared before we clear IIR, or else we
1535 * may miss hotplug events.
1536 */
1537 POSTING_READ(PORT_HOTPLUG_STAT);
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001538
Jani Nikula0d2e4292015-05-27 15:03:39 +03001539 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1540 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001541
Imre Deakfd63e2a2015-07-21 15:32:44 -07001542 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1543 hotplug_trigger, hpd_status_g4x,
1544 i9xx_port_hotplug_long_detect);
Jani Nikula676574d2015-05-28 15:43:53 +03001545 intel_hpd_irq_handler(dev, pin_mask, long_mask);
Jani Nikula369712e2015-05-27 15:03:40 +03001546
1547 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1548 dp_aux_irq_handler(dev);
Jani Nikula0d2e4292015-05-27 15:03:39 +03001549 } else {
1550 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001551
Imre Deakfd63e2a2015-07-21 15:32:44 -07001552 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1553 hotplug_trigger, hpd_status_g4x,
1554 i9xx_port_hotplug_long_detect);
Jani Nikula676574d2015-05-28 15:43:53 +03001555 intel_hpd_irq_handler(dev, pin_mask, long_mask);
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001556 }
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001557}
1558
Daniel Vetterff1f5252012-10-02 15:10:55 +02001559static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001560{
Daniel Vetter45a83f82014-05-12 19:17:55 +02001561 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03001562 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001563 u32 iir, gt_iir, pm_iir;
1564 irqreturn_t ret = IRQ_NONE;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001565
Imre Deak2dd2a882015-02-24 11:14:30 +02001566 if (!intel_irqs_enabled(dev_priv))
1567 return IRQ_NONE;
1568
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001569 while (true) {
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001570 /* Find, clear, then process each source of interrupt */
1571
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001572 gt_iir = I915_READ(GTIIR);
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001573 if (gt_iir)
1574 I915_WRITE(GTIIR, gt_iir);
1575
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001576 pm_iir = I915_READ(GEN6_PMIIR);
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001577 if (pm_iir)
1578 I915_WRITE(GEN6_PMIIR, pm_iir);
1579
1580 iir = I915_READ(VLV_IIR);
1581 if (iir) {
1582 /* Consume port before clearing IIR or we'll miss events */
1583 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1584 i9xx_hpd_irq_handler(dev);
1585 I915_WRITE(VLV_IIR, iir);
1586 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001587
1588 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1589 goto out;
1590
1591 ret = IRQ_HANDLED;
1592
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001593 if (gt_iir)
1594 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Paulo Zanoni60611c12013-08-15 11:50:01 -03001595 if (pm_iir)
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001596 gen6_rps_irq_handler(dev_priv, pm_iir);
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001597 /* Call regardless, as some status bits might not be
1598 * signalled in iir */
1599 valleyview_pipestat_irq_handler(dev, iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001600 }
1601
1602out:
1603 return ret;
1604}
1605
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001606static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1607{
Daniel Vetter45a83f82014-05-12 19:17:55 +02001608 struct drm_device *dev = arg;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001609 struct drm_i915_private *dev_priv = dev->dev_private;
1610 u32 master_ctl, iir;
1611 irqreturn_t ret = IRQ_NONE;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001612
Imre Deak2dd2a882015-02-24 11:14:30 +02001613 if (!intel_irqs_enabled(dev_priv))
1614 return IRQ_NONE;
1615
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001616 for (;;) {
1617 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1618 iir = I915_READ(VLV_IIR);
Ville Syrjälä3278f672014-04-09 13:28:49 +03001619
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001620 if (master_ctl == 0 && iir == 0)
1621 break;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001622
Oscar Mateo27b6c122014-06-16 16:11:00 +01001623 ret = IRQ_HANDLED;
1624
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001625 I915_WRITE(GEN8_MASTER_IRQ, 0);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001626
Oscar Mateo27b6c122014-06-16 16:11:00 +01001627 /* Find, clear, then process each source of interrupt */
1628
1629 if (iir) {
1630 /* Consume port before clearing IIR or we'll miss events */
1631 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1632 i9xx_hpd_irq_handler(dev);
1633 I915_WRITE(VLV_IIR, iir);
1634 }
1635
Chris Wilson74cdb332015-04-07 16:21:05 +01001636 gen8_gt_irq_handler(dev_priv, master_ctl);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001637
Oscar Mateo27b6c122014-06-16 16:11:00 +01001638 /* Call regardless, as some status bits might not be
1639 * signalled in iir */
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001640 valleyview_pipestat_irq_handler(dev, iir);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001641
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001642 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1643 POSTING_READ(GEN8_MASTER_IRQ);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001644 }
1645
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001646 return ret;
1647}
1648
Adam Jackson23e81d62012-06-06 15:45:44 -04001649static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08001650{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001651 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001652 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001653 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08001654
Sonika Jindalaaf5ec22015-07-08 17:07:47 +05301655 if (hotplug_trigger) {
1656 u32 dig_hotplug_reg, pin_mask, long_mask;
Dave Airlie13cf5502014-06-18 11:29:35 +10001657
Sonika Jindalaaf5ec22015-07-08 17:07:47 +05301658 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1659 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1660
Imre Deakfd63e2a2015-07-21 15:32:44 -07001661 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1662 dig_hotplug_reg, hpd_ibx,
1663 pch_port_hotplug_long_detect);
Sonika Jindalaaf5ec22015-07-08 17:07:47 +05301664 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1665 }
Daniel Vetter91d131d2013-06-27 17:52:14 +02001666
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001667 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1668 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1669 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08001670 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001671 port_name(port));
1672 }
Jesse Barnes776ad802011-01-04 15:09:39 -08001673
Daniel Vetterce99c252012-12-01 13:53:47 +01001674 if (pch_iir & SDE_AUX_MASK)
1675 dp_aux_irq_handler(dev);
1676
Jesse Barnes776ad802011-01-04 15:09:39 -08001677 if (pch_iir & SDE_GMBUS)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001678 gmbus_irq_handler(dev);
Jesse Barnes776ad802011-01-04 15:09:39 -08001679
1680 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1681 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1682
1683 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1684 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1685
1686 if (pch_iir & SDE_POISON)
1687 DRM_ERROR("PCH poison interrupt\n");
1688
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001689 if (pch_iir & SDE_FDI_MASK)
Damien Lespiau055e3932014-08-18 13:49:10 +01001690 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001691 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1692 pipe_name(pipe),
1693 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08001694
1695 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1696 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1697
1698 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1699 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1700
Jesse Barnes776ad802011-01-04 15:09:39 -08001701 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001702 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
Paulo Zanoni86642812013-04-12 17:57:57 -03001703
1704 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001705 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
Paulo Zanoni86642812013-04-12 17:57:57 -03001706}
1707
1708static void ivb_err_int_handler(struct drm_device *dev)
1709{
1710 struct drm_i915_private *dev_priv = dev->dev_private;
1711 u32 err_int = I915_READ(GEN7_ERR_INT);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001712 enum pipe pipe;
Paulo Zanoni86642812013-04-12 17:57:57 -03001713
Paulo Zanonide032bf2013-04-12 17:57:58 -03001714 if (err_int & ERR_INT_POISON)
1715 DRM_ERROR("Poison interrupt\n");
1716
Damien Lespiau055e3932014-08-18 13:49:10 +01001717 for_each_pipe(dev_priv, pipe) {
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001718 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1719 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Paulo Zanoni86642812013-04-12 17:57:57 -03001720
Daniel Vetter5a69b892013-10-16 22:55:52 +02001721 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1722 if (IS_IVYBRIDGE(dev))
Daniel Vetter277de952013-10-18 16:37:07 +02001723 ivb_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001724 else
Daniel Vetter277de952013-10-18 16:37:07 +02001725 hsw_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001726 }
1727 }
Shuang He8bf1e9f2013-10-15 18:55:27 +01001728
Paulo Zanoni86642812013-04-12 17:57:57 -03001729 I915_WRITE(GEN7_ERR_INT, err_int);
1730}
1731
1732static void cpt_serr_int_handler(struct drm_device *dev)
1733{
1734 struct drm_i915_private *dev_priv = dev->dev_private;
1735 u32 serr_int = I915_READ(SERR_INT);
1736
Paulo Zanonide032bf2013-04-12 17:57:58 -03001737 if (serr_int & SERR_INT_POISON)
1738 DRM_ERROR("PCH poison interrupt\n");
1739
Paulo Zanoni86642812013-04-12 17:57:57 -03001740 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001741 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
Paulo Zanoni86642812013-04-12 17:57:57 -03001742
1743 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001744 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
Paulo Zanoni86642812013-04-12 17:57:57 -03001745
1746 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001747 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
Paulo Zanoni86642812013-04-12 17:57:57 -03001748
1749 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08001750}
1751
Adam Jackson23e81d62012-06-06 15:45:44 -04001752static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1753{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001754 struct drm_i915_private *dev_priv = dev->dev_private;
Adam Jackson23e81d62012-06-06 15:45:44 -04001755 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001756 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04001757
Sonika Jindalaaf5ec22015-07-08 17:07:47 +05301758 if (hotplug_trigger) {
1759 u32 dig_hotplug_reg, pin_mask, long_mask;
Dave Airlie13cf5502014-06-18 11:29:35 +10001760
Sonika Jindalaaf5ec22015-07-08 17:07:47 +05301761 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1762 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
Imre Deakfd63e2a2015-07-21 15:32:44 -07001763
1764 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1765 dig_hotplug_reg, hpd_cpt,
1766 pch_port_hotplug_long_detect);
Sonika Jindalaaf5ec22015-07-08 17:07:47 +05301767 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1768 }
Daniel Vetter91d131d2013-06-27 17:52:14 +02001769
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001770 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1771 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1772 SDE_AUDIO_POWER_SHIFT_CPT);
1773 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1774 port_name(port));
1775 }
Adam Jackson23e81d62012-06-06 15:45:44 -04001776
1777 if (pch_iir & SDE_AUX_MASK_CPT)
Daniel Vetterce99c252012-12-01 13:53:47 +01001778 dp_aux_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001779
1780 if (pch_iir & SDE_GMBUS_CPT)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001781 gmbus_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001782
1783 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1784 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1785
1786 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1787 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1788
1789 if (pch_iir & SDE_FDI_MASK_CPT)
Damien Lespiau055e3932014-08-18 13:49:10 +01001790 for_each_pipe(dev_priv, pipe)
Adam Jackson23e81d62012-06-06 15:45:44 -04001791 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1792 pipe_name(pipe),
1793 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03001794
1795 if (pch_iir & SDE_ERROR_CPT)
1796 cpt_serr_int_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001797}
1798
Paulo Zanonic008bc62013-07-12 16:35:10 -03001799static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1800{
1801 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter40da17c22013-10-21 18:04:36 +02001802 enum pipe pipe;
Paulo Zanonic008bc62013-07-12 16:35:10 -03001803
1804 if (de_iir & DE_AUX_CHANNEL_A)
1805 dp_aux_irq_handler(dev);
1806
1807 if (de_iir & DE_GSE)
1808 intel_opregion_asle_intr(dev);
1809
Paulo Zanonic008bc62013-07-12 16:35:10 -03001810 if (de_iir & DE_POISON)
1811 DRM_ERROR("Poison interrupt\n");
1812
Damien Lespiau055e3932014-08-18 13:49:10 +01001813 for_each_pipe(dev_priv, pipe) {
Chris Wilsond6bbafa2014-09-05 07:13:24 +01001814 if (de_iir & DE_PIPE_VBLANK(pipe) &&
1815 intel_pipe_handle_vblank(dev, pipe))
1816 intel_check_page_flip(dev, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03001817
Daniel Vetter40da17c22013-10-21 18:04:36 +02001818 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001819 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03001820
Daniel Vetter40da17c22013-10-21 18:04:36 +02001821 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1822 i9xx_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001823
Daniel Vetter40da17c22013-10-21 18:04:36 +02001824 /* plane/pipes map 1:1 on ilk+ */
1825 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1826 intel_prepare_page_flip(dev, pipe);
1827 intel_finish_page_flip_plane(dev, pipe);
1828 }
Paulo Zanonic008bc62013-07-12 16:35:10 -03001829 }
1830
1831 /* check event from PCH */
1832 if (de_iir & DE_PCH_EVENT) {
1833 u32 pch_iir = I915_READ(SDEIIR);
1834
1835 if (HAS_PCH_CPT(dev))
1836 cpt_irq_handler(dev, pch_iir);
1837 else
1838 ibx_irq_handler(dev, pch_iir);
1839
1840 /* should clear PCH hotplug event before clear CPU irq */
1841 I915_WRITE(SDEIIR, pch_iir);
1842 }
1843
1844 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1845 ironlake_rps_change_irq_handler(dev);
1846}
1847
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001848static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1849{
1850 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiau07d27e22014-03-03 17:31:46 +00001851 enum pipe pipe;
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001852
1853 if (de_iir & DE_ERR_INT_IVB)
1854 ivb_err_int_handler(dev);
1855
1856 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1857 dp_aux_irq_handler(dev);
1858
1859 if (de_iir & DE_GSE_IVB)
1860 intel_opregion_asle_intr(dev);
1861
Damien Lespiau055e3932014-08-18 13:49:10 +01001862 for_each_pipe(dev_priv, pipe) {
Chris Wilsond6bbafa2014-09-05 07:13:24 +01001863 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
1864 intel_pipe_handle_vblank(dev, pipe))
1865 intel_check_page_flip(dev, pipe);
Daniel Vetter40da17c22013-10-21 18:04:36 +02001866
1867 /* plane/pipes map 1:1 on ilk+ */
Damien Lespiau07d27e22014-03-03 17:31:46 +00001868 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
1869 intel_prepare_page_flip(dev, pipe);
1870 intel_finish_page_flip_plane(dev, pipe);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001871 }
1872 }
1873
1874 /* check event from PCH */
1875 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1876 u32 pch_iir = I915_READ(SDEIIR);
1877
1878 cpt_irq_handler(dev, pch_iir);
1879
1880 /* clear PCH hotplug event before clear CPU irq */
1881 I915_WRITE(SDEIIR, pch_iir);
1882 }
1883}
1884
Oscar Mateo72c90f62014-06-16 16:10:57 +01001885/*
1886 * To handle irqs with the minimum potential races with fresh interrupts, we:
1887 * 1 - Disable Master Interrupt Control.
1888 * 2 - Find the source(s) of the interrupt.
1889 * 3 - Clear the Interrupt Identity bits (IIR).
1890 * 4 - Process the interrupt(s) that had bits set in the IIRs.
1891 * 5 - Re-enable Master Interrupt Control.
1892 */
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001893static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001894{
Daniel Vetter45a83f82014-05-12 19:17:55 +02001895 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03001896 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001897 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01001898 irqreturn_t ret = IRQ_NONE;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001899
Imre Deak2dd2a882015-02-24 11:14:30 +02001900 if (!intel_irqs_enabled(dev_priv))
1901 return IRQ_NONE;
1902
Paulo Zanoni86642812013-04-12 17:57:57 -03001903 /* We get interrupts on unclaimed registers, so check for this before we
1904 * do any I915_{READ,WRITE}. */
Chris Wilson907b28c2013-07-19 20:36:52 +01001905 intel_uncore_check_errors(dev);
Paulo Zanoni86642812013-04-12 17:57:57 -03001906
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001907 /* disable master interrupt before clearing iir */
1908 de_ier = I915_READ(DEIER);
1909 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Paulo Zanoni23a78512013-07-12 16:35:14 -03001910 POSTING_READ(DEIER);
Chris Wilson0e434062012-05-09 21:45:44 +01001911
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001912 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1913 * interrupts will will be stored on its back queue, and then we'll be
1914 * able to process them after we restore SDEIER (as soon as we restore
1915 * it, we'll get an interrupt if SDEIIR still has something to process
1916 * due to its back queue). */
Ben Widawskyab5c6082013-04-05 13:12:41 -07001917 if (!HAS_PCH_NOP(dev)) {
1918 sde_ier = I915_READ(SDEIER);
1919 I915_WRITE(SDEIER, 0);
1920 POSTING_READ(SDEIER);
1921 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001922
Oscar Mateo72c90f62014-06-16 16:10:57 +01001923 /* Find, clear, then process each source of interrupt */
1924
Chris Wilson0e434062012-05-09 21:45:44 +01001925 gt_iir = I915_READ(GTIIR);
1926 if (gt_iir) {
Oscar Mateo72c90f62014-06-16 16:10:57 +01001927 I915_WRITE(GTIIR, gt_iir);
1928 ret = IRQ_HANDLED;
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001929 if (INTEL_INFO(dev)->gen >= 6)
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001930 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001931 else
1932 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001933 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001934
1935 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01001936 if (de_iir) {
Oscar Mateo72c90f62014-06-16 16:10:57 +01001937 I915_WRITE(DEIIR, de_iir);
1938 ret = IRQ_HANDLED;
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001939 if (INTEL_INFO(dev)->gen >= 7)
1940 ivb_display_irq_handler(dev, de_iir);
1941 else
1942 ilk_display_irq_handler(dev, de_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001943 }
1944
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001945 if (INTEL_INFO(dev)->gen >= 6) {
1946 u32 pm_iir = I915_READ(GEN6_PMIIR);
1947 if (pm_iir) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001948 I915_WRITE(GEN6_PMIIR, pm_iir);
1949 ret = IRQ_HANDLED;
Oscar Mateo72c90f62014-06-16 16:10:57 +01001950 gen6_rps_irq_handler(dev_priv, pm_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001951 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001952 }
1953
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001954 I915_WRITE(DEIER, de_ier);
1955 POSTING_READ(DEIER);
Ben Widawskyab5c6082013-04-05 13:12:41 -07001956 if (!HAS_PCH_NOP(dev)) {
1957 I915_WRITE(SDEIER, sde_ier);
1958 POSTING_READ(SDEIER);
1959 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001960
1961 return ret;
1962}
1963
Shashank Sharmad04a4922014-08-22 17:40:41 +05301964static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
1965{
1966 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikula676574d2015-05-28 15:43:53 +03001967 u32 hp_control, hp_trigger;
1968 u32 pin_mask, long_mask;
Shashank Sharmad04a4922014-08-22 17:40:41 +05301969
1970 /* Get the status */
1971 hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
1972 hp_control = I915_READ(BXT_HOTPLUG_CTL);
1973
1974 /* Hotplug not enabled ? */
1975 if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) {
1976 DRM_ERROR("Interrupt when HPD disabled\n");
1977 return;
1978 }
1979
Shashank Sharmad04a4922014-08-22 17:40:41 +05301980 /* Clear sticky bits in hpd status */
1981 I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
Jani Nikula475c2e32015-05-28 15:43:54 +03001982
Imre Deakfd63e2a2015-07-21 15:32:44 -07001983 intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
Imre Deak63c88d22015-07-20 14:43:39 -07001984 hpd_bxt, bxt_port_hotplug_long_detect);
Jani Nikula475c2e32015-05-28 15:43:54 +03001985 intel_hpd_irq_handler(dev, pin_mask, long_mask);
Shashank Sharmad04a4922014-08-22 17:40:41 +05301986}
1987
Ben Widawskyabd58f02013-11-02 21:07:09 -07001988static irqreturn_t gen8_irq_handler(int irq, void *arg)
1989{
1990 struct drm_device *dev = arg;
1991 struct drm_i915_private *dev_priv = dev->dev_private;
1992 u32 master_ctl;
1993 irqreturn_t ret = IRQ_NONE;
1994 uint32_t tmp = 0;
Daniel Vetterc42664c2013-11-07 11:05:40 +01001995 enum pipe pipe;
Jesse Barnes88e04702014-11-13 17:51:48 +00001996 u32 aux_mask = GEN8_AUX_CHANNEL_A;
1997
Imre Deak2dd2a882015-02-24 11:14:30 +02001998 if (!intel_irqs_enabled(dev_priv))
1999 return IRQ_NONE;
2000
Jesse Barnes88e04702014-11-13 17:51:48 +00002001 if (IS_GEN9(dev))
2002 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2003 GEN9_AUX_CHANNEL_D;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002004
Chris Wilsoncb0d2052015-04-07 16:21:04 +01002005 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002006 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2007 if (!master_ctl)
2008 return IRQ_NONE;
2009
Chris Wilsoncb0d2052015-04-07 16:21:04 +01002010 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002011
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002012 /* Find, clear, then process each source of interrupt */
2013
Chris Wilson74cdb332015-04-07 16:21:05 +01002014 ret = gen8_gt_irq_handler(dev_priv, master_ctl);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002015
2016 if (master_ctl & GEN8_DE_MISC_IRQ) {
2017 tmp = I915_READ(GEN8_DE_MISC_IIR);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002018 if (tmp) {
2019 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2020 ret = IRQ_HANDLED;
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002021 if (tmp & GEN8_DE_MISC_GSE)
2022 intel_opregion_asle_intr(dev);
2023 else
2024 DRM_ERROR("Unexpected DE Misc interrupt\n");
Ben Widawskyabd58f02013-11-02 21:07:09 -07002025 }
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002026 else
2027 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
Ben Widawskyabd58f02013-11-02 21:07:09 -07002028 }
2029
Daniel Vetter6d766f02013-11-07 14:49:55 +01002030 if (master_ctl & GEN8_DE_PORT_IRQ) {
2031 tmp = I915_READ(GEN8_DE_PORT_IIR);
Daniel Vetter6d766f02013-11-07 14:49:55 +01002032 if (tmp) {
Shashank Sharmad04a4922014-08-22 17:40:41 +05302033 bool found = false;
2034
Daniel Vetter6d766f02013-11-07 14:49:55 +01002035 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2036 ret = IRQ_HANDLED;
Jesse Barnes88e04702014-11-13 17:51:48 +00002037
Shashank Sharmad04a4922014-08-22 17:40:41 +05302038 if (tmp & aux_mask) {
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002039 dp_aux_irq_handler(dev);
Shashank Sharmad04a4922014-08-22 17:40:41 +05302040 found = true;
2041 }
2042
2043 if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) {
2044 bxt_hpd_handler(dev, tmp);
2045 found = true;
2046 }
2047
Shashank Sharma9e637432014-08-22 17:40:43 +05302048 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2049 gmbus_irq_handler(dev);
2050 found = true;
2051 }
2052
Shashank Sharmad04a4922014-08-22 17:40:41 +05302053 if (!found)
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002054 DRM_ERROR("Unexpected DE Port interrupt\n");
Daniel Vetter6d766f02013-11-07 14:49:55 +01002055 }
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002056 else
2057 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
Daniel Vetter6d766f02013-11-07 14:49:55 +01002058 }
2059
Damien Lespiau055e3932014-08-18 13:49:10 +01002060 for_each_pipe(dev_priv, pipe) {
Damien Lespiau770de832014-03-20 20:45:01 +00002061 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002062
Daniel Vetterc42664c2013-11-07 11:05:40 +01002063 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2064 continue;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002065
Daniel Vetterc42664c2013-11-07 11:05:40 +01002066 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
Daniel Vetterc42664c2013-11-07 11:05:40 +01002067 if (pipe_iir) {
2068 ret = IRQ_HANDLED;
2069 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
Damien Lespiau770de832014-03-20 20:45:01 +00002070
Chris Wilsond6bbafa2014-09-05 07:13:24 +01002071 if (pipe_iir & GEN8_PIPE_VBLANK &&
2072 intel_pipe_handle_vblank(dev, pipe))
2073 intel_check_page_flip(dev, pipe);
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002074
Damien Lespiau770de832014-03-20 20:45:01 +00002075 if (IS_GEN9(dev))
2076 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2077 else
2078 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2079
2080 if (flip_done) {
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002081 intel_prepare_page_flip(dev, pipe);
2082 intel_finish_page_flip_plane(dev, pipe);
2083 }
2084
2085 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2086 hsw_pipe_crc_irq_handler(dev, pipe);
2087
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002088 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2089 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2090 pipe);
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002091
Damien Lespiau770de832014-03-20 20:45:01 +00002092
2093 if (IS_GEN9(dev))
2094 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2095 else
2096 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2097
2098 if (fault_errors)
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002099 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2100 pipe_name(pipe),
2101 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
Daniel Vetterc42664c2013-11-07 11:05:40 +01002102 } else
Ben Widawskyabd58f02013-11-02 21:07:09 -07002103 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2104 }
2105
Shashank Sharma266ea3d2014-08-22 17:40:42 +05302106 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2107 master_ctl & GEN8_DE_PCH_IRQ) {
Daniel Vetter92d03a82013-11-07 11:05:43 +01002108 /*
2109 * FIXME(BDW): Assume for now that the new interrupt handling
2110 * scheme also closed the SDE interrupt handling race we've seen
2111 * on older pch-split platforms. But this needs testing.
2112 */
2113 u32 pch_iir = I915_READ(SDEIIR);
Daniel Vetter92d03a82013-11-07 11:05:43 +01002114 if (pch_iir) {
2115 I915_WRITE(SDEIIR, pch_iir);
2116 ret = IRQ_HANDLED;
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002117 cpt_irq_handler(dev, pch_iir);
2118 } else
2119 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2120
Daniel Vetter92d03a82013-11-07 11:05:43 +01002121 }
2122
Chris Wilsoncb0d2052015-04-07 16:21:04 +01002123 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2124 POSTING_READ_FW(GEN8_MASTER_IRQ);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002125
2126 return ret;
2127}
2128
Daniel Vetter17e1df02013-09-08 21:57:13 +02002129static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2130 bool reset_completed)
2131{
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002132 struct intel_engine_cs *ring;
Daniel Vetter17e1df02013-09-08 21:57:13 +02002133 int i;
2134
2135 /*
2136 * Notify all waiters for GPU completion events that reset state has
2137 * been changed, and that they need to restart their wait after
2138 * checking for potential errors (and bail out to drop locks if there is
2139 * a gpu reset pending so that i915_error_work_func can acquire them).
2140 */
2141
2142 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2143 for_each_ring(ring, dev_priv, i)
2144 wake_up_all(&ring->irq_queue);
2145
2146 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2147 wake_up_all(&dev_priv->pending_flip_queue);
2148
2149 /*
2150 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2151 * reset state is cleared.
2152 */
2153 if (reset_completed)
2154 wake_up_all(&dev_priv->gpu_error.reset_queue);
2155}
2156
Jesse Barnes8a905232009-07-11 16:48:03 -04002157/**
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02002158 * i915_reset_and_wakeup - do process context error handling work
Jesse Barnes8a905232009-07-11 16:48:03 -04002159 *
2160 * Fire an error uevent so userspace can see that a hang or error
2161 * was detected.
2162 */
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02002163static void i915_reset_and_wakeup(struct drm_device *dev)
Jesse Barnes8a905232009-07-11 16:48:03 -04002164{
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02002165 struct drm_i915_private *dev_priv = to_i915(dev);
2166 struct i915_gpu_error *error = &dev_priv->gpu_error;
Ben Widawskycce723e2013-07-19 09:16:42 -07002167 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2168 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2169 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
Daniel Vetter17e1df02013-09-08 21:57:13 +02002170 int ret;
Jesse Barnes8a905232009-07-11 16:48:03 -04002171
Dave Airlie5bdebb12013-10-11 14:07:25 +10002172 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04002173
Daniel Vetter7db0ba22012-12-06 16:23:37 +01002174 /*
2175 * Note that there's only one work item which does gpu resets, so we
2176 * need not worry about concurrent gpu resets potentially incrementing
2177 * error->reset_counter twice. We only need to take care of another
2178 * racing irq/hangcheck declaring the gpu dead for a second time. A
2179 * quick check for that is good enough: schedule_work ensures the
2180 * correct ordering between hang detection and this work item, and since
2181 * the reset in-progress bit is only ever set by code outside of this
2182 * work we don't need to worry about any other races.
2183 */
2184 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
Chris Wilsonf803aa52010-09-19 12:38:26 +01002185 DRM_DEBUG_DRIVER("resetting chip\n");
Dave Airlie5bdebb12013-10-11 14:07:25 +10002186 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
Daniel Vetter7db0ba22012-12-06 16:23:37 +01002187 reset_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002188
Daniel Vetter17e1df02013-09-08 21:57:13 +02002189 /*
Imre Deakf454c692014-04-23 01:09:04 +03002190 * In most cases it's guaranteed that we get here with an RPM
2191 * reference held, for example because there is a pending GPU
2192 * request that won't finish until the reset is done. This
2193 * isn't the case at least when we get here by doing a
2194 * simulated reset via debugs, so get an RPM reference.
2195 */
2196 intel_runtime_pm_get(dev_priv);
Ville Syrjälä75147472014-11-24 18:28:11 +02002197
2198 intel_prepare_reset(dev);
2199
Imre Deakf454c692014-04-23 01:09:04 +03002200 /*
Daniel Vetter17e1df02013-09-08 21:57:13 +02002201 * All state reset _must_ be completed before we update the
2202 * reset counter, for otherwise waiters might miss the reset
2203 * pending state and not properly drop locks, resulting in
2204 * deadlocks with the reset work.
2205 */
Daniel Vetterf69061b2012-12-06 09:01:42 +01002206 ret = i915_reset(dev);
2207
Ville Syrjälä75147472014-11-24 18:28:11 +02002208 intel_finish_reset(dev);
Daniel Vetter17e1df02013-09-08 21:57:13 +02002209
Imre Deakf454c692014-04-23 01:09:04 +03002210 intel_runtime_pm_put(dev_priv);
2211
Daniel Vetterf69061b2012-12-06 09:01:42 +01002212 if (ret == 0) {
2213 /*
2214 * After all the gem state is reset, increment the reset
2215 * counter and wake up everyone waiting for the reset to
2216 * complete.
2217 *
2218 * Since unlock operations are a one-sided barrier only,
2219 * we need to insert a barrier here to order any seqno
2220 * updates before
2221 * the counter increment.
2222 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002223 smp_mb__before_atomic();
Daniel Vetterf69061b2012-12-06 09:01:42 +01002224 atomic_inc(&dev_priv->gpu_error.reset_counter);
2225
Dave Airlie5bdebb12013-10-11 14:07:25 +10002226 kobject_uevent_env(&dev->primary->kdev->kobj,
Daniel Vetterf69061b2012-12-06 09:01:42 +01002227 KOBJ_CHANGE, reset_done_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002228 } else {
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02002229 atomic_set_mask(I915_WEDGED, &error->reset_counter);
Ben Gamarif316a422009-09-14 17:48:46 -04002230 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002231
Daniel Vetter17e1df02013-09-08 21:57:13 +02002232 /*
2233 * Note: The wake_up also serves as a memory barrier so that
2234 * waiters see the update value of the reset counter atomic_t.
2235 */
2236 i915_error_wake_up(dev_priv, true);
Ben Gamarif316a422009-09-14 17:48:46 -04002237 }
Jesse Barnes8a905232009-07-11 16:48:03 -04002238}
2239
Chris Wilson35aed2e2010-05-27 13:18:12 +01002240static void i915_report_and_clear_eir(struct drm_device *dev)
Jesse Barnes8a905232009-07-11 16:48:03 -04002241{
2242 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskybd9854f2012-08-23 15:18:09 -07002243 uint32_t instdone[I915_NUM_INSTDONE_REG];
Jesse Barnes8a905232009-07-11 16:48:03 -04002244 u32 eir = I915_READ(EIR);
Ben Widawsky050ee912012-08-22 11:32:15 -07002245 int pipe, i;
Jesse Barnes8a905232009-07-11 16:48:03 -04002246
Chris Wilson35aed2e2010-05-27 13:18:12 +01002247 if (!eir)
2248 return;
Jesse Barnes8a905232009-07-11 16:48:03 -04002249
Joe Perchesa70491c2012-03-18 13:00:11 -07002250 pr_err("render error detected, EIR: 0x%08x\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04002251
Ben Widawskybd9854f2012-08-23 15:18:09 -07002252 i915_get_extra_instdone(dev, instdone);
2253
Jesse Barnes8a905232009-07-11 16:48:03 -04002254 if (IS_G4X(dev)) {
2255 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2256 u32 ipeir = I915_READ(IPEIR_I965);
2257
Joe Perchesa70491c2012-03-18 13:00:11 -07002258 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2259 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Ben Widawsky050ee912012-08-22 11:32:15 -07002260 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2261 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Joe Perchesa70491c2012-03-18 13:00:11 -07002262 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002263 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002264 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002265 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002266 }
2267 if (eir & GM45_ERROR_PAGE_TABLE) {
2268 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002269 pr_err("page table error\n");
2270 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002271 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002272 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002273 }
2274 }
2275
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002276 if (!IS_GEN2(dev)) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002277 if (eir & I915_ERROR_PAGE_TABLE) {
2278 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002279 pr_err("page table error\n");
2280 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002281 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002282 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002283 }
2284 }
2285
2286 if (eir & I915_ERROR_MEMORY_REFRESH) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002287 pr_err("memory refresh error:\n");
Damien Lespiau055e3932014-08-18 13:49:10 +01002288 for_each_pipe(dev_priv, pipe)
Joe Perchesa70491c2012-03-18 13:00:11 -07002289 pr_err("pipe %c stat: 0x%08x\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002290 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
Jesse Barnes8a905232009-07-11 16:48:03 -04002291 /* pipestat has already been acked */
2292 }
2293 if (eir & I915_ERROR_INSTRUCTION) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002294 pr_err("instruction error\n");
2295 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
Ben Widawsky050ee912012-08-22 11:32:15 -07002296 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2297 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002298 if (INTEL_INFO(dev)->gen < 4) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002299 u32 ipeir = I915_READ(IPEIR);
2300
Joe Perchesa70491c2012-03-18 13:00:11 -07002301 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2302 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
Joe Perchesa70491c2012-03-18 13:00:11 -07002303 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
Jesse Barnes8a905232009-07-11 16:48:03 -04002304 I915_WRITE(IPEIR, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002305 POSTING_READ(IPEIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002306 } else {
2307 u32 ipeir = I915_READ(IPEIR_I965);
2308
Joe Perchesa70491c2012-03-18 13:00:11 -07002309 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2310 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Joe Perchesa70491c2012-03-18 13:00:11 -07002311 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002312 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002313 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002314 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002315 }
2316 }
2317
2318 I915_WRITE(EIR, eir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002319 POSTING_READ(EIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002320 eir = I915_READ(EIR);
2321 if (eir) {
2322 /*
2323 * some errors might have become stuck,
2324 * mask them.
2325 */
2326 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2327 I915_WRITE(EMR, I915_READ(EMR) | eir);
2328 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2329 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01002330}
2331
2332/**
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02002333 * i915_handle_error - handle a gpu error
Chris Wilson35aed2e2010-05-27 13:18:12 +01002334 * @dev: drm device
2335 *
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02002336 * Do some basic checking of regsiter state at error time and
Chris Wilson35aed2e2010-05-27 13:18:12 +01002337 * dump it to the syslog. Also call i915_capture_error_state() to make
2338 * sure we get a record and make it available in debugfs. Fire a uevent
2339 * so userspace knows something bad happened (should trigger collection
2340 * of a ring dump etc.).
2341 */
Mika Kuoppala58174462014-02-25 17:11:26 +02002342void i915_handle_error(struct drm_device *dev, bool wedged,
2343 const char *fmt, ...)
Chris Wilson35aed2e2010-05-27 13:18:12 +01002344{
2345 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala58174462014-02-25 17:11:26 +02002346 va_list args;
2347 char error_msg[80];
Chris Wilson35aed2e2010-05-27 13:18:12 +01002348
Mika Kuoppala58174462014-02-25 17:11:26 +02002349 va_start(args, fmt);
2350 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2351 va_end(args);
2352
2353 i915_capture_error_state(dev, wedged, error_msg);
Chris Wilson35aed2e2010-05-27 13:18:12 +01002354 i915_report_and_clear_eir(dev);
Jesse Barnes8a905232009-07-11 16:48:03 -04002355
Ben Gamariba1234d2009-09-14 17:48:47 -04002356 if (wedged) {
Daniel Vetterf69061b2012-12-06 09:01:42 +01002357 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2358 &dev_priv->gpu_error.reset_counter);
Ben Gamariba1234d2009-09-14 17:48:47 -04002359
Ben Gamari11ed50e2009-09-14 17:48:45 -04002360 /*
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02002361 * Wakeup waiting processes so that the reset function
2362 * i915_reset_and_wakeup doesn't deadlock trying to grab
2363 * various locks. By bumping the reset counter first, the woken
Daniel Vetter17e1df02013-09-08 21:57:13 +02002364 * processes will see a reset in progress and back off,
2365 * releasing their locks and then wait for the reset completion.
2366 * We must do this for _all_ gpu waiters that might hold locks
2367 * that the reset work needs to acquire.
2368 *
2369 * Note: The wake_up serves as the required memory barrier to
2370 * ensure that the waiters see the updated value of the reset
2371 * counter atomic_t.
Ben Gamari11ed50e2009-09-14 17:48:45 -04002372 */
Daniel Vetter17e1df02013-09-08 21:57:13 +02002373 i915_error_wake_up(dev_priv, false);
Ben Gamari11ed50e2009-09-14 17:48:45 -04002374 }
2375
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02002376 i915_reset_and_wakeup(dev);
Jesse Barnes8a905232009-07-11 16:48:03 -04002377}
2378
Keith Packard42f52ef2008-10-18 19:39:29 -07002379/* Called from drm generic code, passed 'crtc' which
2380 * we use as a pipe index
2381 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002382static int i915_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002383{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002384 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002385 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002386
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002387 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002388 if (INTEL_INFO(dev)->gen >= 4)
Keith Packard7c463582008-11-04 02:03:27 -08002389 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002390 PIPE_START_VBLANK_INTERRUPT_STATUS);
Keith Packarde9d21d72008-10-16 11:31:38 -07002391 else
Keith Packard7c463582008-11-04 02:03:27 -08002392 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002393 PIPE_VBLANK_INTERRUPT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002394 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002395
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002396 return 0;
2397}
2398
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002399static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002400{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002401 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesf796cf82011-04-07 13:58:17 -07002402 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002403 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c22013-10-21 18:04:36 +02002404 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002405
Jesse Barnesf796cf82011-04-07 13:58:17 -07002406 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002407 ironlake_enable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002408 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2409
2410 return 0;
2411}
2412
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002413static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2414{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002415 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002416 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002417
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002418 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002419 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002420 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002421 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2422
2423 return 0;
2424}
2425
Ben Widawskyabd58f02013-11-02 21:07:09 -07002426static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2427{
2428 struct drm_i915_private *dev_priv = dev->dev_private;
2429 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002430
Ben Widawskyabd58f02013-11-02 21:07:09 -07002431 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter7167d7c2013-11-07 11:05:45 +01002432 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2433 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2434 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
Ben Widawskyabd58f02013-11-02 21:07:09 -07002435 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2436 return 0;
2437}
2438
Keith Packard42f52ef2008-10-18 19:39:29 -07002439/* Called from drm generic code, passed 'crtc' which
2440 * we use as a pipe index
2441 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002442static void i915_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002443{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002444 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002445 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002446
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002447 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002448 i915_disable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002449 PIPE_VBLANK_INTERRUPT_STATUS |
2450 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002451 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2452}
2453
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002454static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002455{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002456 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesf796cf82011-04-07 13:58:17 -07002457 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002458 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c22013-10-21 18:04:36 +02002459 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002460
2461 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002462 ironlake_disable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002463 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2464}
2465
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002466static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2467{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002468 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002469 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002470
2471 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002472 i915_disable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002473 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002474 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2475}
2476
Ben Widawskyabd58f02013-11-02 21:07:09 -07002477static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2478{
2479 struct drm_i915_private *dev_priv = dev->dev_private;
2480 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002481
Ben Widawskyabd58f02013-11-02 21:07:09 -07002482 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter7167d7c2013-11-07 11:05:45 +01002483 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2484 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2485 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
Ben Widawskyabd58f02013-11-02 21:07:09 -07002486 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2487}
2488
Chris Wilson9107e9d2013-06-10 11:20:20 +01002489static bool
Tomas Elf94f7bbe2015-07-09 15:30:57 +01002490ring_idle(struct intel_engine_cs *ring, u32 seqno)
Chris Wilson893eead2010-10-27 14:44:35 +01002491{
Chris Wilson9107e9d2013-06-10 11:20:20 +01002492 return (list_empty(&ring->request_list) ||
Tomas Elf94f7bbe2015-07-09 15:30:57 +01002493 i915_seqno_passed(seqno, ring->last_submitted_seqno));
Ben Gamarif65d9422009-09-14 17:48:44 -04002494}
2495
Daniel Vettera028c4b2014-03-15 00:08:56 +01002496static bool
2497ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2498{
2499 if (INTEL_INFO(dev)->gen >= 8) {
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002500 return (ipehr >> 23) == 0x1c;
Daniel Vettera028c4b2014-03-15 00:08:56 +01002501 } else {
2502 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2503 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2504 MI_SEMAPHORE_REGISTER);
2505 }
2506}
2507
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002508static struct intel_engine_cs *
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002509semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
Daniel Vetter921d42e2014-03-18 10:26:04 +01002510{
2511 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002512 struct intel_engine_cs *signaller;
Daniel Vetter921d42e2014-03-18 10:26:04 +01002513 int i;
2514
2515 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002516 for_each_ring(signaller, dev_priv, i) {
2517 if (ring == signaller)
2518 continue;
2519
2520 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2521 return signaller;
2522 }
Daniel Vetter921d42e2014-03-18 10:26:04 +01002523 } else {
2524 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2525
2526 for_each_ring(signaller, dev_priv, i) {
2527 if(ring == signaller)
2528 continue;
2529
Ben Widawskyebc348b2014-04-29 14:52:28 -07002530 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
Daniel Vetter921d42e2014-03-18 10:26:04 +01002531 return signaller;
2532 }
2533 }
2534
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002535 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2536 ring->id, ipehr, offset);
Daniel Vetter921d42e2014-03-18 10:26:04 +01002537
2538 return NULL;
2539}
2540
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002541static struct intel_engine_cs *
2542semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
Chris Wilsona24a11e2013-03-14 17:52:05 +02002543{
2544 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Daniel Vetter88fe4292014-03-15 00:08:55 +01002545 u32 cmd, ipehr, head;
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002546 u64 offset = 0;
2547 int i, backwards;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002548
2549 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
Daniel Vettera028c4b2014-03-15 00:08:56 +01002550 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
Chris Wilson6274f212013-06-10 11:20:21 +01002551 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002552
Daniel Vetter88fe4292014-03-15 00:08:55 +01002553 /*
2554 * HEAD is likely pointing to the dword after the actual command,
2555 * so scan backwards until we find the MBOX. But limit it to just 3
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002556 * or 4 dwords depending on the semaphore wait command size.
2557 * Note that we don't care about ACTHD here since that might
Daniel Vetter88fe4292014-03-15 00:08:55 +01002558 * point at at batch, and semaphores are always emitted into the
2559 * ringbuffer itself.
Chris Wilsona24a11e2013-03-14 17:52:05 +02002560 */
Daniel Vetter88fe4292014-03-15 00:08:55 +01002561 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002562 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
Daniel Vetter88fe4292014-03-15 00:08:55 +01002563
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002564 for (i = backwards; i; --i) {
Daniel Vetter88fe4292014-03-15 00:08:55 +01002565 /*
2566 * Be paranoid and presume the hw has gone off into the wild -
2567 * our ring is smaller than what the hardware (and hence
2568 * HEAD_ADDR) allows. Also handles wrap-around.
2569 */
Oscar Mateoee1b1e52014-05-22 14:13:35 +01002570 head &= ring->buffer->size - 1;
Daniel Vetter88fe4292014-03-15 00:08:55 +01002571
2572 /* This here seems to blow up */
Oscar Mateoee1b1e52014-05-22 14:13:35 +01002573 cmd = ioread32(ring->buffer->virtual_start + head);
Chris Wilsona24a11e2013-03-14 17:52:05 +02002574 if (cmd == ipehr)
2575 break;
2576
Daniel Vetter88fe4292014-03-15 00:08:55 +01002577 head -= 4;
2578 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02002579
Daniel Vetter88fe4292014-03-15 00:08:55 +01002580 if (!i)
2581 return NULL;
2582
Oscar Mateoee1b1e52014-05-22 14:13:35 +01002583 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002584 if (INTEL_INFO(ring->dev)->gen >= 8) {
2585 offset = ioread32(ring->buffer->virtual_start + head + 12);
2586 offset <<= 32;
2587 offset = ioread32(ring->buffer->virtual_start + head + 8);
2588 }
2589 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
Chris Wilsona24a11e2013-03-14 17:52:05 +02002590}
2591
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002592static int semaphore_passed(struct intel_engine_cs *ring)
Chris Wilson6274f212013-06-10 11:20:21 +01002593{
2594 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002595 struct intel_engine_cs *signaller;
Chris Wilsona0d036b2014-07-19 12:40:42 +01002596 u32 seqno;
Chris Wilson6274f212013-06-10 11:20:21 +01002597
Chris Wilson4be17382014-06-06 10:22:29 +01002598 ring->hangcheck.deadlock++;
Chris Wilson6274f212013-06-10 11:20:21 +01002599
2600 signaller = semaphore_waits_for(ring, &seqno);
Chris Wilson4be17382014-06-06 10:22:29 +01002601 if (signaller == NULL)
2602 return -1;
2603
2604 /* Prevent pathological recursion due to driver bugs */
2605 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
Chris Wilson6274f212013-06-10 11:20:21 +01002606 return -1;
2607
Chris Wilson4be17382014-06-06 10:22:29 +01002608 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2609 return 1;
2610
Chris Wilsona0d036b2014-07-19 12:40:42 +01002611 /* cursory check for an unkickable deadlock */
2612 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2613 semaphore_passed(signaller) < 0)
Chris Wilson4be17382014-06-06 10:22:29 +01002614 return -1;
2615
2616 return 0;
Chris Wilson6274f212013-06-10 11:20:21 +01002617}
2618
2619static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2620{
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002621 struct intel_engine_cs *ring;
Chris Wilson6274f212013-06-10 11:20:21 +01002622 int i;
2623
2624 for_each_ring(ring, dev_priv, i)
Chris Wilson4be17382014-06-06 10:22:29 +01002625 ring->hangcheck.deadlock = 0;
Chris Wilson6274f212013-06-10 11:20:21 +01002626}
2627
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002628static enum intel_ring_hangcheck_action
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002629ring_stuck(struct intel_engine_cs *ring, u64 acthd)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002630{
2631 struct drm_device *dev = ring->dev;
2632 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002633 u32 tmp;
2634
Mika Kuoppalaf260fe72014-08-05 17:16:26 +03002635 if (acthd != ring->hangcheck.acthd) {
2636 if (acthd > ring->hangcheck.max_acthd) {
2637 ring->hangcheck.max_acthd = acthd;
2638 return HANGCHECK_ACTIVE;
2639 }
2640
2641 return HANGCHECK_ACTIVE_LOOP;
2642 }
Chris Wilson6274f212013-06-10 11:20:21 +01002643
Chris Wilson9107e9d2013-06-10 11:20:20 +01002644 if (IS_GEN2(dev))
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002645 return HANGCHECK_HUNG;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002646
2647 /* Is the chip hanging on a WAIT_FOR_EVENT?
2648 * If so we can simply poke the RB_WAIT bit
2649 * and break the hang. This should work on
2650 * all but the second generation chipsets.
2651 */
2652 tmp = I915_READ_CTL(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002653 if (tmp & RING_WAIT) {
Mika Kuoppala58174462014-02-25 17:11:26 +02002654 i915_handle_error(dev, false,
2655 "Kicking stuck wait on %s",
2656 ring->name);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002657 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002658 return HANGCHECK_KICK;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002659 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02002660
Chris Wilson6274f212013-06-10 11:20:21 +01002661 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2662 switch (semaphore_passed(ring)) {
2663 default:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002664 return HANGCHECK_HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002665 case 1:
Mika Kuoppala58174462014-02-25 17:11:26 +02002666 i915_handle_error(dev, false,
2667 "Kicking stuck semaphore on %s",
2668 ring->name);
Chris Wilson6274f212013-06-10 11:20:21 +01002669 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002670 return HANGCHECK_KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002671 case 0:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002672 return HANGCHECK_WAIT;
Chris Wilson6274f212013-06-10 11:20:21 +01002673 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002674 }
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002675
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002676 return HANGCHECK_HUNG;
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002677}
2678
Chris Wilson737b1502015-01-26 18:03:03 +02002679/*
Ben Gamarif65d9422009-09-14 17:48:44 -04002680 * This is called when the chip hasn't reported back with completed
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002681 * batchbuffers in a long time. We keep track per ring seqno progress and
2682 * if there are no progress, hangcheck score for that ring is increased.
2683 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2684 * we kick the ring. If we see no progress on three subsequent calls
2685 * we assume chip is wedged and try to fix it by resetting the chip.
Ben Gamarif65d9422009-09-14 17:48:44 -04002686 */
Chris Wilson737b1502015-01-26 18:03:03 +02002687static void i915_hangcheck_elapsed(struct work_struct *work)
Ben Gamarif65d9422009-09-14 17:48:44 -04002688{
Chris Wilson737b1502015-01-26 18:03:03 +02002689 struct drm_i915_private *dev_priv =
2690 container_of(work, typeof(*dev_priv),
2691 gpu_error.hangcheck_work.work);
2692 struct drm_device *dev = dev_priv->dev;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002693 struct intel_engine_cs *ring;
Chris Wilsonb4519512012-05-11 14:29:30 +01002694 int i;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002695 int busy_count = 0, rings_hung = 0;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002696 bool stuck[I915_NUM_RINGS] = { 0 };
2697#define BUSY 1
2698#define KICK 5
2699#define HUNG 20
Chris Wilson893eead2010-10-27 14:44:35 +01002700
Jani Nikulad330a952014-01-21 11:24:25 +02002701 if (!i915.enable_hangcheck)
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07002702 return;
2703
Chris Wilsonb4519512012-05-11 14:29:30 +01002704 for_each_ring(ring, dev_priv, i) {
Chris Wilson50877442014-03-21 12:41:53 +00002705 u64 acthd;
2706 u32 seqno;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002707 bool busy = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01002708
Chris Wilson6274f212013-06-10 11:20:21 +01002709 semaphore_clear_deadlocks(dev_priv);
2710
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002711 seqno = ring->get_seqno(ring, false);
2712 acthd = intel_ring_get_active_head(ring);
Chris Wilsond1e61e72012-04-10 17:00:41 +01002713
Chris Wilson9107e9d2013-06-10 11:20:20 +01002714 if (ring->hangcheck.seqno == seqno) {
Tomas Elf94f7bbe2015-07-09 15:30:57 +01002715 if (ring_idle(ring, seqno)) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002716 ring->hangcheck.action = HANGCHECK_IDLE;
2717
Chris Wilson9107e9d2013-06-10 11:20:20 +01002718 if (waitqueue_active(&ring->irq_queue)) {
2719 /* Issue a wake-up to catch stuck h/w. */
Chris Wilson094f9a52013-09-25 17:34:55 +01002720 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
Daniel Vetterf4adcd22013-10-28 09:24:13 +01002721 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2722 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2723 ring->name);
2724 else
2725 DRM_INFO("Fake missed irq on %s\n",
2726 ring->name);
Chris Wilson094f9a52013-09-25 17:34:55 +01002727 wake_up_all(&ring->irq_queue);
2728 }
2729 /* Safeguard against driver failure */
2730 ring->hangcheck.score += BUSY;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002731 } else
2732 busy = false;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002733 } else {
Chris Wilson6274f212013-06-10 11:20:21 +01002734 /* We always increment the hangcheck score
2735 * if the ring is busy and still processing
2736 * the same request, so that no single request
2737 * can run indefinitely (such as a chain of
2738 * batches). The only time we do not increment
2739 * the hangcheck score on this ring, if this
2740 * ring is in a legitimate wait for another
2741 * ring. In that case the waiting ring is a
2742 * victim and we want to be sure we catch the
2743 * right culprit. Then every time we do kick
2744 * the ring, add a small increment to the
2745 * score so that we can catch a batch that is
2746 * being repeatedly kicked and so responsible
2747 * for stalling the machine.
2748 */
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002749 ring->hangcheck.action = ring_stuck(ring,
2750 acthd);
2751
2752 switch (ring->hangcheck.action) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002753 case HANGCHECK_IDLE:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002754 case HANGCHECK_WAIT:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002755 case HANGCHECK_ACTIVE:
Mika Kuoppalaf260fe72014-08-05 17:16:26 +03002756 break;
2757 case HANGCHECK_ACTIVE_LOOP:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002758 ring->hangcheck.score += BUSY;
Chris Wilson6274f212013-06-10 11:20:21 +01002759 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002760 case HANGCHECK_KICK:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002761 ring->hangcheck.score += KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002762 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002763 case HANGCHECK_HUNG:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002764 ring->hangcheck.score += HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002765 stuck[i] = true;
2766 break;
2767 }
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002768 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002769 } else {
Mika Kuoppalada661462013-09-06 16:03:28 +03002770 ring->hangcheck.action = HANGCHECK_ACTIVE;
2771
Chris Wilson9107e9d2013-06-10 11:20:20 +01002772 /* Gradually reduce the count so that we catch DoS
2773 * attempts across multiple batches.
2774 */
2775 if (ring->hangcheck.score > 0)
2776 ring->hangcheck.score--;
Mika Kuoppalaf260fe72014-08-05 17:16:26 +03002777
2778 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
Chris Wilsond1e61e72012-04-10 17:00:41 +01002779 }
2780
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002781 ring->hangcheck.seqno = seqno;
2782 ring->hangcheck.acthd = acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002783 busy_count += busy;
Chris Wilson893eead2010-10-27 14:44:35 +01002784 }
Eric Anholtb9201c12010-01-08 14:25:16 -08002785
Mika Kuoppala92cab732013-05-24 17:16:07 +03002786 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002787 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
Daniel Vetterb8d88d12013-08-28 10:57:59 +02002788 DRM_INFO("%s on %s\n",
2789 stuck[i] ? "stuck" : "no progress",
2790 ring->name);
Chris Wilsona43adf02013-06-10 11:20:22 +01002791 rings_hung++;
Mika Kuoppala92cab732013-05-24 17:16:07 +03002792 }
2793 }
2794
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002795 if (rings_hung)
Mika Kuoppala58174462014-02-25 17:11:26 +02002796 return i915_handle_error(dev, true, "Ring hung");
Ben Gamarif65d9422009-09-14 17:48:44 -04002797
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002798 if (busy_count)
2799 /* Reset timer case chip hangs without another request
2800 * being added */
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002801 i915_queue_hangcheck(dev);
2802}
2803
2804void i915_queue_hangcheck(struct drm_device *dev)
2805{
Chris Wilson737b1502015-01-26 18:03:03 +02002806 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
Chris Wilson672e7b72014-11-19 09:47:19 +00002807
Jani Nikulad330a952014-01-21 11:24:25 +02002808 if (!i915.enable_hangcheck)
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002809 return;
2810
Chris Wilson737b1502015-01-26 18:03:03 +02002811 /* Don't continually defer the hangcheck so that it is always run at
2812 * least once after work has been scheduled on any ring. Otherwise,
2813 * we will ignore a hung ring if a second ring is kept busy.
2814 */
2815
2816 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
2817 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
Ben Gamarif65d9422009-09-14 17:48:44 -04002818}
2819
Paulo Zanoni1c69eb42014-04-01 15:37:23 -03002820static void ibx_irq_reset(struct drm_device *dev)
Paulo Zanoni91738a92013-06-05 14:21:51 -03002821{
2822 struct drm_i915_private *dev_priv = dev->dev_private;
2823
2824 if (HAS_PCH_NOP(dev))
2825 return;
2826
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002827 GEN5_IRQ_RESET(SDE);
Paulo Zanoni105b1222014-04-01 15:37:17 -03002828
2829 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2830 I915_WRITE(SERR_INT, 0xffffffff);
Paulo Zanoni622364b2014-04-01 15:37:22 -03002831}
Paulo Zanoni105b1222014-04-01 15:37:17 -03002832
Paulo Zanoni622364b2014-04-01 15:37:22 -03002833/*
2834 * SDEIER is also touched by the interrupt handler to work around missed PCH
2835 * interrupts. Hence we can't update it after the interrupt handler is enabled -
2836 * instead we unconditionally enable all PCH interrupt sources here, but then
2837 * only unmask them as needed with SDEIMR.
2838 *
2839 * This function needs to be called before interrupts are enabled.
2840 */
2841static void ibx_irq_pre_postinstall(struct drm_device *dev)
2842{
2843 struct drm_i915_private *dev_priv = dev->dev_private;
2844
2845 if (HAS_PCH_NOP(dev))
2846 return;
2847
2848 WARN_ON(I915_READ(SDEIER) != 0);
Paulo Zanoni91738a92013-06-05 14:21:51 -03002849 I915_WRITE(SDEIER, 0xffffffff);
2850 POSTING_READ(SDEIER);
2851}
2852
Paulo Zanoni7c4d6642014-04-01 15:37:19 -03002853static void gen5_gt_irq_reset(struct drm_device *dev)
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002854{
2855 struct drm_i915_private *dev_priv = dev->dev_private;
2856
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002857 GEN5_IRQ_RESET(GT);
Paulo Zanonia9d356a2014-04-01 15:37:09 -03002858 if (INTEL_INFO(dev)->gen >= 6)
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002859 GEN5_IRQ_RESET(GEN6_PM);
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002860}
2861
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862/* drm_dma.h hooks
2863*/
Paulo Zanonibe30b292014-04-01 15:37:25 -03002864static void ironlake_irq_reset(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002865{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002866 struct drm_i915_private *dev_priv = dev->dev_private;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002867
Paulo Zanoni0c841212014-04-01 15:37:27 -03002868 I915_WRITE(HWSTAM, 0xffffffff);
Daniel Vetterbdfcdb62012-01-05 01:05:26 +01002869
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002870 GEN5_IRQ_RESET(DE);
Paulo Zanonic6d954c2014-04-01 15:37:18 -03002871 if (IS_GEN7(dev))
2872 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002873
Paulo Zanoni7c4d6642014-04-01 15:37:19 -03002874 gen5_gt_irq_reset(dev);
Zhenyu Wangc6501562009-11-03 18:57:21 +00002875
Paulo Zanoni1c69eb42014-04-01 15:37:23 -03002876 ibx_irq_reset(dev);
Ben Widawsky7d991632013-05-28 19:22:25 -07002877}
2878
Ville Syrjälä70591a42014-10-30 19:42:58 +02002879static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2880{
2881 enum pipe pipe;
2882
2883 I915_WRITE(PORT_HOTPLUG_EN, 0);
2884 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2885
2886 for_each_pipe(dev_priv, pipe)
2887 I915_WRITE(PIPESTAT(pipe), 0xffff);
2888
2889 GEN5_IRQ_RESET(VLV_);
2890}
2891
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002892static void valleyview_irq_preinstall(struct drm_device *dev)
2893{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002894 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002895
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002896 /* VLV magic */
2897 I915_WRITE(VLV_IMR, 0);
2898 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2899 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2900 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2901
Paulo Zanoni7c4d6642014-04-01 15:37:19 -03002902 gen5_gt_irq_reset(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002903
Ville Syrjälä7c4cde32014-10-30 19:42:51 +02002904 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002905
Ville Syrjälä70591a42014-10-30 19:42:58 +02002906 vlv_display_irq_reset(dev_priv);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002907}
2908
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02002909static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
2910{
2911 GEN8_IRQ_RESET_NDX(GT, 0);
2912 GEN8_IRQ_RESET_NDX(GT, 1);
2913 GEN8_IRQ_RESET_NDX(GT, 2);
2914 GEN8_IRQ_RESET_NDX(GT, 3);
2915}
2916
Paulo Zanoni823f6b32014-04-01 15:37:26 -03002917static void gen8_irq_reset(struct drm_device *dev)
Ben Widawskyabd58f02013-11-02 21:07:09 -07002918{
2919 struct drm_i915_private *dev_priv = dev->dev_private;
2920 int pipe;
2921
Ben Widawskyabd58f02013-11-02 21:07:09 -07002922 I915_WRITE(GEN8_MASTER_IRQ, 0);
2923 POSTING_READ(GEN8_MASTER_IRQ);
2924
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02002925 gen8_gt_irq_reset(dev_priv);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002926
Damien Lespiau055e3932014-08-18 13:49:10 +01002927 for_each_pipe(dev_priv, pipe)
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002928 if (intel_display_power_is_enabled(dev_priv,
2929 POWER_DOMAIN_PIPE(pipe)))
Paulo Zanoni813bde42014-07-04 11:50:29 -03002930 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002931
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002932 GEN5_IRQ_RESET(GEN8_DE_PORT_);
2933 GEN5_IRQ_RESET(GEN8_DE_MISC_);
2934 GEN5_IRQ_RESET(GEN8_PCU_);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002935
Shashank Sharma266ea3d2014-08-22 17:40:42 +05302936 if (HAS_PCH_SPLIT(dev))
2937 ibx_irq_reset(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002938}
Ben Widawskyabd58f02013-11-02 21:07:09 -07002939
Damien Lespiau4c6c03b2015-03-06 18:50:48 +00002940void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2941 unsigned int pipe_mask)
Paulo Zanonid49bdb02014-07-04 11:50:31 -03002942{
Paulo Zanoni1180e202014-10-07 18:02:52 -03002943 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
Paulo Zanonid49bdb02014-07-04 11:50:31 -03002944
Daniel Vetter13321782014-09-15 14:55:29 +02002945 spin_lock_irq(&dev_priv->irq_lock);
Damien Lespiaud14c0342015-03-06 18:50:51 +00002946 if (pipe_mask & 1 << PIPE_A)
2947 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
2948 dev_priv->de_irq_mask[PIPE_A],
2949 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
Damien Lespiau4c6c03b2015-03-06 18:50:48 +00002950 if (pipe_mask & 1 << PIPE_B)
2951 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
2952 dev_priv->de_irq_mask[PIPE_B],
2953 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
2954 if (pipe_mask & 1 << PIPE_C)
2955 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
2956 dev_priv->de_irq_mask[PIPE_C],
2957 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
Daniel Vetter13321782014-09-15 14:55:29 +02002958 spin_unlock_irq(&dev_priv->irq_lock);
Paulo Zanonid49bdb02014-07-04 11:50:31 -03002959}
2960
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002961static void cherryview_irq_preinstall(struct drm_device *dev)
2962{
2963 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002964
2965 I915_WRITE(GEN8_MASTER_IRQ, 0);
2966 POSTING_READ(GEN8_MASTER_IRQ);
2967
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02002968 gen8_gt_irq_reset(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002969
2970 GEN5_IRQ_RESET(GEN8_PCU_);
2971
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002972 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2973
Ville Syrjälä70591a42014-10-30 19:42:58 +02002974 vlv_display_irq_reset(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002975}
2976
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002977static void ibx_hpd_irq_setup(struct drm_device *dev)
Keith Packard7fe0b972011-09-19 13:31:02 -07002978{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002979 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002980 struct intel_encoder *intel_encoder;
Daniel Vetterfee884e2013-07-04 23:35:21 +02002981 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
Keith Packard7fe0b972011-09-19 13:31:02 -07002982
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002983 if (HAS_PCH_IBX(dev)) {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002984 hotplug_irqs = SDE_HOTPLUG_MASK;
Damien Lespiaub2784e12014-08-05 11:29:37 +01002985 for_each_intel_encoder(dev, intel_encoder)
Jani Nikula5fcece82015-05-27 15:03:42 +03002986 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002987 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002988 } else {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002989 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
Damien Lespiaub2784e12014-08-05 11:29:37 +01002990 for_each_intel_encoder(dev, intel_encoder)
Jani Nikula5fcece82015-05-27 15:03:42 +03002991 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002992 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002993 }
2994
Daniel Vetterfee884e2013-07-04 23:35:21 +02002995 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002996
2997 /*
2998 * Enable digital hotplug on the PCH, and configure the DP short pulse
2999 * duration to 2ms (which is the minimum in the Display Port spec)
3000 *
3001 * This register is the same on all known PCH chips.
3002 */
Keith Packard7fe0b972011-09-19 13:31:02 -07003003 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3004 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3005 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3006 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3007 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3008 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3009}
3010
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003011static void bxt_hpd_irq_setup(struct drm_device *dev)
3012{
3013 struct drm_i915_private *dev_priv = dev->dev_private;
3014 struct intel_encoder *intel_encoder;
3015 u32 hotplug_port = 0;
3016 u32 hotplug_ctrl;
3017
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003018 for_each_intel_encoder(dev, intel_encoder) {
Jani Nikula5fcece82015-05-27 15:03:42 +03003019 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003020 == HPD_ENABLED)
3021 hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
3022 }
3023
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003024 hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
3025
Sonika Jindal7f3561b2015-08-10 10:35:35 +05303026 if (hotplug_port & BXT_DE_PORT_HP_DDIA)
3027 hotplug_ctrl |= BXT_DDIA_HPD_ENABLE;
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003028 if (hotplug_port & BXT_DE_PORT_HP_DDIB)
3029 hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
3030 if (hotplug_port & BXT_DE_PORT_HP_DDIC)
3031 hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
3032 I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
3033
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003034 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
3035 I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
3036
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003037 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
3038 I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
3039 POSTING_READ(GEN8_DE_PORT_IER);
3040}
3041
Paulo Zanonid46da432013-02-08 17:35:15 -02003042static void ibx_irq_postinstall(struct drm_device *dev)
3043{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003044 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003045 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02003046
Daniel Vetter692a04c2013-05-29 21:43:05 +02003047 if (HAS_PCH_NOP(dev))
3048 return;
3049
Paulo Zanoni105b1222014-04-01 15:37:17 -03003050 if (HAS_PCH_IBX(dev))
Daniel Vetter5c673b62014-03-07 20:34:46 +01003051 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
Paulo Zanoni105b1222014-04-01 15:37:17 -03003052 else
Daniel Vetter5c673b62014-03-07 20:34:46 +01003053 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
Paulo Zanoni86642812013-04-12 17:57:57 -03003054
Paulo Zanoni337ba012014-04-01 15:37:16 -03003055 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
Paulo Zanonid46da432013-02-08 17:35:15 -02003056 I915_WRITE(SDEIMR, ~mask);
Paulo Zanonid46da432013-02-08 17:35:15 -02003057}
3058
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003059static void gen5_gt_irq_postinstall(struct drm_device *dev)
3060{
3061 struct drm_i915_private *dev_priv = dev->dev_private;
3062 u32 pm_irqs, gt_irqs;
3063
3064 pm_irqs = gt_irqs = 0;
3065
3066 dev_priv->gt_irq_mask = ~0;
Ben Widawsky040d2ba2013-09-19 11:01:40 -07003067 if (HAS_L3_DPF(dev)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003068 /* L3 parity interrupt is always unmasked. */
Ben Widawsky35a85ac2013-09-19 11:13:41 -07003069 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3070 gt_irqs |= GT_PARITY_ERROR(dev);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003071 }
3072
3073 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3074 if (IS_GEN5(dev)) {
3075 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3076 ILK_BSD_USER_INTERRUPT;
3077 } else {
3078 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3079 }
3080
Paulo Zanoni35079892014-04-01 15:37:15 -03003081 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003082
3083 if (INTEL_INFO(dev)->gen >= 6) {
Imre Deak78e68d32014-12-15 18:59:27 +02003084 /*
3085 * RPS interrupts will get enabled/disabled on demand when RPS
3086 * itself is enabled/disabled.
3087 */
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003088 if (HAS_VEBOX(dev))
3089 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3090
Paulo Zanoni605cd252013-08-06 18:57:15 -03003091 dev_priv->pm_irq_mask = 0xffffffff;
Paulo Zanoni35079892014-04-01 15:37:15 -03003092 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003093 }
3094}
3095
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003096static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003097{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003098 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003099 u32 display_mask, extra_mask;
3100
3101 if (INTEL_INFO(dev)->gen >= 7) {
3102 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3103 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3104 DE_PLANEB_FLIP_DONE_IVB |
Daniel Vetter5c673b62014-03-07 20:34:46 +01003105 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003106 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
Daniel Vetter5c673b62014-03-07 20:34:46 +01003107 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003108 } else {
3109 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3110 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02003111 DE_AUX_CHANNEL_A |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02003112 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3113 DE_POISON);
Daniel Vetter5c673b62014-03-07 20:34:46 +01003114 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3115 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003116 }
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003117
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003118 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003119
Paulo Zanoni0c841212014-04-01 15:37:27 -03003120 I915_WRITE(HWSTAM, 0xeffe);
3121
Paulo Zanoni622364b2014-04-01 15:37:22 -03003122 ibx_irq_pre_postinstall(dev);
3123
Paulo Zanoni35079892014-04-01 15:37:15 -03003124 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003125
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003126 gen5_gt_irq_postinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003127
Paulo Zanonid46da432013-02-08 17:35:15 -02003128 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07003129
Jesse Barnesf97108d2010-01-29 11:27:07 -08003130 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02003131 /* Enable PCU event interrupts
3132 *
3133 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02003134 * setup is guaranteed to run in single-threaded context. But we
3135 * need it to make the assert_spin_locked happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003136 spin_lock_irq(&dev_priv->irq_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -08003137 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetterd6207432014-09-15 14:55:27 +02003138 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -08003139 }
3140
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003141 return 0;
3142}
3143
Imre Deakf8b79e52014-03-04 19:23:07 +02003144static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3145{
3146 u32 pipestat_mask;
3147 u32 iir_mask;
Ville Syrjälä120dda42014-10-30 19:42:57 +02003148 enum pipe pipe;
Imre Deakf8b79e52014-03-04 19:23:07 +02003149
3150 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3151 PIPE_FIFO_UNDERRUN_STATUS;
3152
Ville Syrjälä120dda42014-10-30 19:42:57 +02003153 for_each_pipe(dev_priv, pipe)
3154 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
Imre Deakf8b79e52014-03-04 19:23:07 +02003155 POSTING_READ(PIPESTAT(PIPE_A));
3156
3157 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3158 PIPE_CRC_DONE_INTERRUPT_STATUS;
3159
Ville Syrjälä120dda42014-10-30 19:42:57 +02003160 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3161 for_each_pipe(dev_priv, pipe)
3162 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
Imre Deakf8b79e52014-03-04 19:23:07 +02003163
3164 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3165 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3166 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
Ville Syrjälä120dda42014-10-30 19:42:57 +02003167 if (IS_CHERRYVIEW(dev_priv))
3168 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
Imre Deakf8b79e52014-03-04 19:23:07 +02003169 dev_priv->irq_mask &= ~iir_mask;
3170
3171 I915_WRITE(VLV_IIR, iir_mask);
3172 I915_WRITE(VLV_IIR, iir_mask);
Imre Deakf8b79e52014-03-04 19:23:07 +02003173 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
Ville Syrjälä76e41862014-10-30 19:42:54 +02003174 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3175 POSTING_READ(VLV_IMR);
Imre Deakf8b79e52014-03-04 19:23:07 +02003176}
3177
3178static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3179{
3180 u32 pipestat_mask;
3181 u32 iir_mask;
Ville Syrjälä120dda42014-10-30 19:42:57 +02003182 enum pipe pipe;
Imre Deakf8b79e52014-03-04 19:23:07 +02003183
3184 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3185 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
Imre Deak6c7fba02014-03-10 19:44:48 +02003186 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
Ville Syrjälä120dda42014-10-30 19:42:57 +02003187 if (IS_CHERRYVIEW(dev_priv))
3188 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
Imre Deakf8b79e52014-03-04 19:23:07 +02003189
3190 dev_priv->irq_mask |= iir_mask;
Imre Deakf8b79e52014-03-04 19:23:07 +02003191 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
Ville Syrjälä76e41862014-10-30 19:42:54 +02003192 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
Imre Deakf8b79e52014-03-04 19:23:07 +02003193 I915_WRITE(VLV_IIR, iir_mask);
3194 I915_WRITE(VLV_IIR, iir_mask);
3195 POSTING_READ(VLV_IIR);
3196
3197 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3198 PIPE_CRC_DONE_INTERRUPT_STATUS;
3199
Ville Syrjälä120dda42014-10-30 19:42:57 +02003200 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3201 for_each_pipe(dev_priv, pipe)
3202 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
Imre Deakf8b79e52014-03-04 19:23:07 +02003203
3204 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3205 PIPE_FIFO_UNDERRUN_STATUS;
Ville Syrjälä120dda42014-10-30 19:42:57 +02003206
3207 for_each_pipe(dev_priv, pipe)
3208 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
Imre Deakf8b79e52014-03-04 19:23:07 +02003209 POSTING_READ(PIPESTAT(PIPE_A));
3210}
3211
3212void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3213{
3214 assert_spin_locked(&dev_priv->irq_lock);
3215
3216 if (dev_priv->display_irqs_enabled)
3217 return;
3218
3219 dev_priv->display_irqs_enabled = true;
3220
Imre Deak950eaba2014-09-08 15:21:09 +03003221 if (intel_irqs_enabled(dev_priv))
Imre Deakf8b79e52014-03-04 19:23:07 +02003222 valleyview_display_irqs_install(dev_priv);
3223}
3224
3225void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3226{
3227 assert_spin_locked(&dev_priv->irq_lock);
3228
3229 if (!dev_priv->display_irqs_enabled)
3230 return;
3231
3232 dev_priv->display_irqs_enabled = false;
3233
Imre Deak950eaba2014-09-08 15:21:09 +03003234 if (intel_irqs_enabled(dev_priv))
Imre Deakf8b79e52014-03-04 19:23:07 +02003235 valleyview_display_irqs_uninstall(dev_priv);
3236}
3237
Ville Syrjälä0e6c9a92014-10-30 19:43:00 +02003238static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003239{
Imre Deakf8b79e52014-03-04 19:23:07 +02003240 dev_priv->irq_mask = ~0;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003241
Daniel Vetter20afbda2012-12-11 14:05:07 +01003242 I915_WRITE(PORT_HOTPLUG_EN, 0);
3243 POSTING_READ(PORT_HOTPLUG_EN);
3244
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003245 I915_WRITE(VLV_IIR, 0xffffffff);
Ville Syrjälä76e41862014-10-30 19:42:54 +02003246 I915_WRITE(VLV_IIR, 0xffffffff);
3247 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3248 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3249 POSTING_READ(VLV_IMR);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003250
Daniel Vetterb79480b2013-06-27 17:52:10 +02003251 /* Interrupt setup is already guaranteed to be single-threaded, this is
3252 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003253 spin_lock_irq(&dev_priv->irq_lock);
Imre Deakf8b79e52014-03-04 19:23:07 +02003254 if (dev_priv->display_irqs_enabled)
3255 valleyview_display_irqs_install(dev_priv);
Daniel Vetterd6207432014-09-15 14:55:27 +02003256 spin_unlock_irq(&dev_priv->irq_lock);
Ville Syrjälä0e6c9a92014-10-30 19:43:00 +02003257}
3258
3259static int valleyview_irq_postinstall(struct drm_device *dev)
3260{
3261 struct drm_i915_private *dev_priv = dev->dev_private;
3262
3263 vlv_display_irq_postinstall(dev_priv);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003264
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003265 gen5_gt_irq_postinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003266
3267 /* ack & enable invalid PTE error interrupts */
3268#if 0 /* FIXME: add support to irq handler for checking these bits */
3269 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3270 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3271#endif
3272
3273 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003274
3275 return 0;
3276}
3277
Ben Widawskyabd58f02013-11-02 21:07:09 -07003278static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3279{
Ben Widawskyabd58f02013-11-02 21:07:09 -07003280 /* These are interrupts we'll toggle with the ring mask register */
3281 uint32_t gt_interrupts[] = {
3282 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003283 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
Ben Widawskyabd58f02013-11-02 21:07:09 -07003284 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003285 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3286 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
Ben Widawskyabd58f02013-11-02 21:07:09 -07003287 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003288 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3289 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3290 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
Ben Widawskyabd58f02013-11-02 21:07:09 -07003291 0,
Oscar Mateo73d477f2014-07-24 17:04:31 +01003292 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3293 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
Ben Widawskyabd58f02013-11-02 21:07:09 -07003294 };
3295
Ben Widawsky09610212014-05-15 20:58:08 +03003296 dev_priv->pm_irq_mask = 0xffffffff;
Deepak S9a2d2d82014-08-22 08:32:40 +05303297 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3298 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
Imre Deak78e68d32014-12-15 18:59:27 +02003299 /*
3300 * RPS interrupts will get enabled/disabled on demand when RPS itself
3301 * is enabled/disabled.
3302 */
3303 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
Deepak S9a2d2d82014-08-22 08:32:40 +05303304 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003305}
3306
3307static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3308{
Damien Lespiau770de832014-03-20 20:45:01 +00003309 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3310 uint32_t de_pipe_enables;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003311 int pipe;
Shashank Sharma9e637432014-08-22 17:40:43 +05303312 u32 de_port_en = GEN8_AUX_CHANNEL_A;
Damien Lespiau770de832014-03-20 20:45:01 +00003313
Jesse Barnes88e04702014-11-13 17:51:48 +00003314 if (IS_GEN9(dev_priv)) {
Damien Lespiau770de832014-03-20 20:45:01 +00003315 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3316 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
Shashank Sharma9e637432014-08-22 17:40:43 +05303317 de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
Jesse Barnes88e04702014-11-13 17:51:48 +00003318 GEN9_AUX_CHANNEL_D;
Shashank Sharma9e637432014-08-22 17:40:43 +05303319
3320 if (IS_BROXTON(dev_priv))
3321 de_port_en |= BXT_DE_PORT_GMBUS;
Jesse Barnes88e04702014-11-13 17:51:48 +00003322 } else
Damien Lespiau770de832014-03-20 20:45:01 +00003323 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3324 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3325
3326 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3327 GEN8_PIPE_FIFO_UNDERRUN;
3328
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01003329 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3330 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3331 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003332
Damien Lespiau055e3932014-08-18 13:49:10 +01003333 for_each_pipe(dev_priv, pipe)
Daniel Vetterf458ebb2014-09-30 10:56:39 +02003334 if (intel_display_power_is_enabled(dev_priv,
Paulo Zanoni813bde42014-07-04 11:50:29 -03003335 POWER_DOMAIN_PIPE(pipe)))
3336 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3337 dev_priv->de_irq_mask[pipe],
3338 de_pipe_enables);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003339
Shashank Sharma9e637432014-08-22 17:40:43 +05303340 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003341}
3342
3343static int gen8_irq_postinstall(struct drm_device *dev)
3344{
3345 struct drm_i915_private *dev_priv = dev->dev_private;
3346
Shashank Sharma266ea3d2014-08-22 17:40:42 +05303347 if (HAS_PCH_SPLIT(dev))
3348 ibx_irq_pre_postinstall(dev);
Paulo Zanoni622364b2014-04-01 15:37:22 -03003349
Ben Widawskyabd58f02013-11-02 21:07:09 -07003350 gen8_gt_irq_postinstall(dev_priv);
3351 gen8_de_irq_postinstall(dev_priv);
3352
Shashank Sharma266ea3d2014-08-22 17:40:42 +05303353 if (HAS_PCH_SPLIT(dev))
3354 ibx_irq_postinstall(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003355
3356 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3357 POSTING_READ(GEN8_MASTER_IRQ);
3358
3359 return 0;
3360}
3361
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003362static int cherryview_irq_postinstall(struct drm_device *dev)
3363{
3364 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003365
Ville Syrjäläc2b66792014-10-30 19:43:02 +02003366 vlv_display_irq_postinstall(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003367
3368 gen8_gt_irq_postinstall(dev_priv);
3369
3370 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3371 POSTING_READ(GEN8_MASTER_IRQ);
3372
3373 return 0;
3374}
3375
Ben Widawskyabd58f02013-11-02 21:07:09 -07003376static void gen8_irq_uninstall(struct drm_device *dev)
3377{
3378 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003379
3380 if (!dev_priv)
3381 return;
3382
Paulo Zanoni823f6b32014-04-01 15:37:26 -03003383 gen8_irq_reset(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003384}
3385
Ville Syrjälä8ea0be42014-10-30 19:42:59 +02003386static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3387{
3388 /* Interrupt setup is already guaranteed to be single-threaded, this is
3389 * just to make the assert_spin_locked check happy. */
3390 spin_lock_irq(&dev_priv->irq_lock);
3391 if (dev_priv->display_irqs_enabled)
3392 valleyview_display_irqs_uninstall(dev_priv);
3393 spin_unlock_irq(&dev_priv->irq_lock);
3394
3395 vlv_display_irq_reset(dev_priv);
3396
Imre Deakc352d1b2014-11-20 16:05:55 +02003397 dev_priv->irq_mask = ~0;
Ville Syrjälä8ea0be42014-10-30 19:42:59 +02003398}
3399
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003400static void valleyview_irq_uninstall(struct drm_device *dev)
3401{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003402 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003403
3404 if (!dev_priv)
3405 return;
3406
Imre Deak843d0e72014-04-14 20:24:23 +03003407 I915_WRITE(VLV_MASTER_IER, 0);
3408
Ville Syrjälä893fce82014-10-30 19:42:56 +02003409 gen5_gt_irq_reset(dev);
3410
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003411 I915_WRITE(HWSTAM, 0xffffffff);
Imre Deakf8b79e52014-03-04 19:23:07 +02003412
Ville Syrjälä8ea0be42014-10-30 19:42:59 +02003413 vlv_display_irq_uninstall(dev_priv);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003414}
3415
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003416static void cherryview_irq_uninstall(struct drm_device *dev)
3417{
3418 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003419
3420 if (!dev_priv)
3421 return;
3422
3423 I915_WRITE(GEN8_MASTER_IRQ, 0);
3424 POSTING_READ(GEN8_MASTER_IRQ);
3425
Ville Syrjäläa2c30fb2014-10-30 19:42:52 +02003426 gen8_gt_irq_reset(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003427
Ville Syrjäläa2c30fb2014-10-30 19:42:52 +02003428 GEN5_IRQ_RESET(GEN8_PCU_);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003429
Ville Syrjäläc2b66792014-10-30 19:43:02 +02003430 vlv_display_irq_uninstall(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003431}
3432
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003433static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003434{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003435 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes46979952011-04-07 13:53:55 -07003436
3437 if (!dev_priv)
3438 return;
3439
Paulo Zanonibe30b292014-04-01 15:37:25 -03003440 ironlake_irq_reset(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003441}
3442
Chris Wilsonc2798b12012-04-22 21:13:57 +01003443static void i8xx_irq_preinstall(struct drm_device * dev)
3444{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003445 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003446 int pipe;
3447
Damien Lespiau055e3932014-08-18 13:49:10 +01003448 for_each_pipe(dev_priv, pipe)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003449 I915_WRITE(PIPESTAT(pipe), 0);
3450 I915_WRITE16(IMR, 0xffff);
3451 I915_WRITE16(IER, 0x0);
3452 POSTING_READ16(IER);
3453}
3454
3455static int i8xx_irq_postinstall(struct drm_device *dev)
3456{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003457 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003458
Chris Wilsonc2798b12012-04-22 21:13:57 +01003459 I915_WRITE16(EMR,
3460 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3461
3462 /* Unmask the interrupts that we always want on. */
3463 dev_priv->irq_mask =
3464 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3465 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3466 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
Daniel Vetter37ef01a2015-04-01 13:43:46 +02003467 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003468 I915_WRITE16(IMR, dev_priv->irq_mask);
3469
3470 I915_WRITE16(IER,
3471 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3472 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Chris Wilsonc2798b12012-04-22 21:13:57 +01003473 I915_USER_INTERRUPT);
3474 POSTING_READ16(IER);
3475
Daniel Vetter379ef822013-10-16 22:55:56 +02003476 /* Interrupt setup is already guaranteed to be single-threaded, this is
3477 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003478 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02003479 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3480 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02003481 spin_unlock_irq(&dev_priv->irq_lock);
Daniel Vetter379ef822013-10-16 22:55:56 +02003482
Chris Wilsonc2798b12012-04-22 21:13:57 +01003483 return 0;
3484}
3485
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003486/*
3487 * Returns true when a page flip has completed.
3488 */
3489static bool i8xx_handle_vblank(struct drm_device *dev,
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003490 int plane, int pipe, u32 iir)
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003491{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003492 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003493 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003494
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03003495 if (!intel_pipe_handle_vblank(dev, pipe))
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003496 return false;
3497
3498 if ((iir & flip_pending) == 0)
Chris Wilsond6bbafa2014-09-05 07:13:24 +01003499 goto check_page_flip;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003500
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003501 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3502 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3503 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3504 * the flip is completed (no longer pending). Since this doesn't raise
3505 * an interrupt per se, we watch for the change at vblank.
3506 */
3507 if (I915_READ16(ISR) & flip_pending)
Chris Wilsond6bbafa2014-09-05 07:13:24 +01003508 goto check_page_flip;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003509
Ville Syrjälä7d475592014-12-17 23:08:03 +02003510 intel_prepare_page_flip(dev, plane);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003511 intel_finish_page_flip(dev, pipe);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003512 return true;
Chris Wilsond6bbafa2014-09-05 07:13:24 +01003513
3514check_page_flip:
3515 intel_check_page_flip(dev, pipe);
3516 return false;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003517}
3518
Daniel Vetterff1f5252012-10-02 15:10:55 +02003519static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003520{
Daniel Vetter45a83f82014-05-12 19:17:55 +02003521 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03003522 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003523 u16 iir, new_iir;
3524 u32 pipe_stats[2];
Chris Wilsonc2798b12012-04-22 21:13:57 +01003525 int pipe;
3526 u16 flip_mask =
3527 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3528 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3529
Imre Deak2dd2a882015-02-24 11:14:30 +02003530 if (!intel_irqs_enabled(dev_priv))
3531 return IRQ_NONE;
3532
Chris Wilsonc2798b12012-04-22 21:13:57 +01003533 iir = I915_READ16(IIR);
3534 if (iir == 0)
3535 return IRQ_NONE;
3536
3537 while (iir & ~flip_mask) {
3538 /* Can't rely on pipestat interrupt bit in iir as it might
3539 * have been cleared after the pipestat interrupt was received.
3540 * It doesn't set the bit in iir again, but it still produces
3541 * interrupts (for non-MSI).
3542 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02003543 spin_lock(&dev_priv->irq_lock);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003544 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Daniel Vetteraaecdf62014-11-04 15:52:22 +01003545 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003546
Damien Lespiau055e3932014-08-18 13:49:10 +01003547 for_each_pipe(dev_priv, pipe) {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003548 int reg = PIPESTAT(pipe);
3549 pipe_stats[pipe] = I915_READ(reg);
3550
3551 /*
3552 * Clear the PIPE*STAT regs before the IIR
3553 */
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003554 if (pipe_stats[pipe] & 0x8000ffff)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003555 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003556 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02003557 spin_unlock(&dev_priv->irq_lock);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003558
3559 I915_WRITE16(IIR, iir & ~flip_mask);
3560 new_iir = I915_READ16(IIR); /* Flush posted writes */
3561
Chris Wilsonc2798b12012-04-22 21:13:57 +01003562 if (iir & I915_USER_INTERRUPT)
Chris Wilson74cdb332015-04-07 16:21:05 +01003563 notify_ring(&dev_priv->ring[RCS]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003564
Damien Lespiau055e3932014-08-18 13:49:10 +01003565 for_each_pipe(dev_priv, pipe) {
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003566 int plane = pipe;
Daniel Vetter3a77c4c2014-01-10 08:50:12 +01003567 if (HAS_FBC(dev))
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003568 plane = !plane;
3569
Daniel Vetter4356d582013-10-16 22:55:55 +02003570 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003571 i8xx_handle_vblank(dev, plane, pipe, iir))
3572 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003573
Daniel Vetter4356d582013-10-16 22:55:55 +02003574 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003575 i9xx_pipe_crc_irq_handler(dev, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003576
Daniel Vetter1f7247c2014-09-30 10:56:48 +02003577 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3578 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3579 pipe);
Daniel Vetter4356d582013-10-16 22:55:55 +02003580 }
Chris Wilsonc2798b12012-04-22 21:13:57 +01003581
3582 iir = new_iir;
3583 }
3584
3585 return IRQ_HANDLED;
3586}
3587
3588static void i8xx_irq_uninstall(struct drm_device * dev)
3589{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003590 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003591 int pipe;
3592
Damien Lespiau055e3932014-08-18 13:49:10 +01003593 for_each_pipe(dev_priv, pipe) {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003594 /* Clear enable bits; then clear status bits */
3595 I915_WRITE(PIPESTAT(pipe), 0);
3596 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3597 }
3598 I915_WRITE16(IMR, 0xffff);
3599 I915_WRITE16(IER, 0x0);
3600 I915_WRITE16(IIR, I915_READ16(IIR));
3601}
3602
Chris Wilsona266c7d2012-04-24 22:59:44 +01003603static void i915_irq_preinstall(struct drm_device * dev)
3604{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003605 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003606 int pipe;
3607
Chris Wilsona266c7d2012-04-24 22:59:44 +01003608 if (I915_HAS_HOTPLUG(dev)) {
3609 I915_WRITE(PORT_HOTPLUG_EN, 0);
3610 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3611 }
3612
Chris Wilson00d98eb2012-04-24 22:59:48 +01003613 I915_WRITE16(HWSTAM, 0xeffe);
Damien Lespiau055e3932014-08-18 13:49:10 +01003614 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003615 I915_WRITE(PIPESTAT(pipe), 0);
3616 I915_WRITE(IMR, 0xffffffff);
3617 I915_WRITE(IER, 0x0);
3618 POSTING_READ(IER);
3619}
3620
3621static int i915_irq_postinstall(struct drm_device *dev)
3622{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003623 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson38bde182012-04-24 22:59:50 +01003624 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003625
Chris Wilson38bde182012-04-24 22:59:50 +01003626 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3627
3628 /* Unmask the interrupts that we always want on. */
3629 dev_priv->irq_mask =
3630 ~(I915_ASLE_INTERRUPT |
3631 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3632 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3633 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
Daniel Vetter37ef01a2015-04-01 13:43:46 +02003634 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilson38bde182012-04-24 22:59:50 +01003635
3636 enable_mask =
3637 I915_ASLE_INTERRUPT |
3638 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3639 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Chris Wilson38bde182012-04-24 22:59:50 +01003640 I915_USER_INTERRUPT;
3641
Chris Wilsona266c7d2012-04-24 22:59:44 +01003642 if (I915_HAS_HOTPLUG(dev)) {
Daniel Vetter20afbda2012-12-11 14:05:07 +01003643 I915_WRITE(PORT_HOTPLUG_EN, 0);
3644 POSTING_READ(PORT_HOTPLUG_EN);
3645
Chris Wilsona266c7d2012-04-24 22:59:44 +01003646 /* Enable in IER... */
3647 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3648 /* and unmask in IMR */
3649 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3650 }
3651
Chris Wilsona266c7d2012-04-24 22:59:44 +01003652 I915_WRITE(IMR, dev_priv->irq_mask);
3653 I915_WRITE(IER, enable_mask);
3654 POSTING_READ(IER);
3655
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003656 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003657
Daniel Vetter379ef822013-10-16 22:55:56 +02003658 /* Interrupt setup is already guaranteed to be single-threaded, this is
3659 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003660 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02003661 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3662 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02003663 spin_unlock_irq(&dev_priv->irq_lock);
Daniel Vetter379ef822013-10-16 22:55:56 +02003664
Daniel Vetter20afbda2012-12-11 14:05:07 +01003665 return 0;
3666}
3667
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003668/*
3669 * Returns true when a page flip has completed.
3670 */
3671static bool i915_handle_vblank(struct drm_device *dev,
3672 int plane, int pipe, u32 iir)
3673{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003674 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003675 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3676
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03003677 if (!intel_pipe_handle_vblank(dev, pipe))
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003678 return false;
3679
3680 if ((iir & flip_pending) == 0)
Chris Wilsond6bbafa2014-09-05 07:13:24 +01003681 goto check_page_flip;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003682
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003683 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3684 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3685 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3686 * the flip is completed (no longer pending). Since this doesn't raise
3687 * an interrupt per se, we watch for the change at vblank.
3688 */
3689 if (I915_READ(ISR) & flip_pending)
Chris Wilsond6bbafa2014-09-05 07:13:24 +01003690 goto check_page_flip;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003691
Ville Syrjälä7d475592014-12-17 23:08:03 +02003692 intel_prepare_page_flip(dev, plane);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003693 intel_finish_page_flip(dev, pipe);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003694 return true;
Chris Wilsond6bbafa2014-09-05 07:13:24 +01003695
3696check_page_flip:
3697 intel_check_page_flip(dev, pipe);
3698 return false;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003699}
3700
Daniel Vetterff1f5252012-10-02 15:10:55 +02003701static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003702{
Daniel Vetter45a83f82014-05-12 19:17:55 +02003703 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03003704 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8291ee92012-04-24 22:59:47 +01003705 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilson38bde182012-04-24 22:59:50 +01003706 u32 flip_mask =
3707 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3708 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilson38bde182012-04-24 22:59:50 +01003709 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003710
Imre Deak2dd2a882015-02-24 11:14:30 +02003711 if (!intel_irqs_enabled(dev_priv))
3712 return IRQ_NONE;
3713
Chris Wilsona266c7d2012-04-24 22:59:44 +01003714 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01003715 do {
3716 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01003717 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003718
3719 /* Can't rely on pipestat interrupt bit in iir as it might
3720 * have been cleared after the pipestat interrupt was received.
3721 * It doesn't set the bit in iir again, but it still produces
3722 * interrupts (for non-MSI).
3723 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02003724 spin_lock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003725 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Daniel Vetteraaecdf62014-11-04 15:52:22 +01003726 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003727
Damien Lespiau055e3932014-08-18 13:49:10 +01003728 for_each_pipe(dev_priv, pipe) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003729 int reg = PIPESTAT(pipe);
3730 pipe_stats[pipe] = I915_READ(reg);
3731
Chris Wilson38bde182012-04-24 22:59:50 +01003732 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003733 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003734 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01003735 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003736 }
3737 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02003738 spin_unlock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003739
3740 if (!irq_received)
3741 break;
3742
Chris Wilsona266c7d2012-04-24 22:59:44 +01003743 /* Consume port. Then clear IIR or we'll miss events */
Ville Syrjälä16c6c562014-04-01 10:54:36 +03003744 if (I915_HAS_HOTPLUG(dev) &&
3745 iir & I915_DISPLAY_PORT_INTERRUPT)
3746 i9xx_hpd_irq_handler(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003747
Chris Wilson38bde182012-04-24 22:59:50 +01003748 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003749 new_iir = I915_READ(IIR); /* Flush posted writes */
3750
Chris Wilsona266c7d2012-04-24 22:59:44 +01003751 if (iir & I915_USER_INTERRUPT)
Chris Wilson74cdb332015-04-07 16:21:05 +01003752 notify_ring(&dev_priv->ring[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003753
Damien Lespiau055e3932014-08-18 13:49:10 +01003754 for_each_pipe(dev_priv, pipe) {
Chris Wilson38bde182012-04-24 22:59:50 +01003755 int plane = pipe;
Daniel Vetter3a77c4c2014-01-10 08:50:12 +01003756 if (HAS_FBC(dev))
Chris Wilson38bde182012-04-24 22:59:50 +01003757 plane = !plane;
Ville Syrjälä5e2032d2013-02-19 15:16:38 +02003758
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003759 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3760 i915_handle_vblank(dev, plane, pipe, iir))
3761 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003762
3763 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3764 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003765
3766 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003767 i9xx_pipe_crc_irq_handler(dev, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003768
Daniel Vetter1f7247c2014-09-30 10:56:48 +02003769 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3770 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3771 pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003772 }
3773
Chris Wilsona266c7d2012-04-24 22:59:44 +01003774 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3775 intel_opregion_asle_intr(dev);
3776
3777 /* With MSI, interrupts are only generated when iir
3778 * transitions from zero to nonzero. If another bit got
3779 * set while we were handling the existing iir bits, then
3780 * we would never get another interrupt.
3781 *
3782 * This is fine on non-MSI as well, as if we hit this path
3783 * we avoid exiting the interrupt handler only to generate
3784 * another one.
3785 *
3786 * Note that for MSI this could cause a stray interrupt report
3787 * if an interrupt landed in the time between writing IIR and
3788 * the posting read. This should be rare enough to never
3789 * trigger the 99% of 100,000 interrupts test for disabling
3790 * stray interrupts.
3791 */
Chris Wilson38bde182012-04-24 22:59:50 +01003792 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003793 iir = new_iir;
Chris Wilson38bde182012-04-24 22:59:50 +01003794 } while (iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003795
3796 return ret;
3797}
3798
3799static void i915_irq_uninstall(struct drm_device * dev)
3800{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003801 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003802 int pipe;
3803
Chris Wilsona266c7d2012-04-24 22:59:44 +01003804 if (I915_HAS_HOTPLUG(dev)) {
3805 I915_WRITE(PORT_HOTPLUG_EN, 0);
3806 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3807 }
3808
Chris Wilson00d98eb2012-04-24 22:59:48 +01003809 I915_WRITE16(HWSTAM, 0xffff);
Damien Lespiau055e3932014-08-18 13:49:10 +01003810 for_each_pipe(dev_priv, pipe) {
Chris Wilson55b39752012-04-24 22:59:49 +01003811 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003812 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01003813 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3814 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003815 I915_WRITE(IMR, 0xffffffff);
3816 I915_WRITE(IER, 0x0);
3817
Chris Wilsona266c7d2012-04-24 22:59:44 +01003818 I915_WRITE(IIR, I915_READ(IIR));
3819}
3820
3821static void i965_irq_preinstall(struct drm_device * dev)
3822{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003823 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003824 int pipe;
3825
Chris Wilsonadca4732012-05-11 18:01:31 +01003826 I915_WRITE(PORT_HOTPLUG_EN, 0);
3827 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003828
3829 I915_WRITE(HWSTAM, 0xeffe);
Damien Lespiau055e3932014-08-18 13:49:10 +01003830 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003831 I915_WRITE(PIPESTAT(pipe), 0);
3832 I915_WRITE(IMR, 0xffffffff);
3833 I915_WRITE(IER, 0x0);
3834 POSTING_READ(IER);
3835}
3836
3837static int i965_irq_postinstall(struct drm_device *dev)
3838{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003839 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003840 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003841 u32 error_mask;
3842
Chris Wilsona266c7d2012-04-24 22:59:44 +01003843 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003844 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01003845 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003846 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3847 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3848 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3849 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3850 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3851
3852 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003853 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3854 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003855 enable_mask |= I915_USER_INTERRUPT;
3856
3857 if (IS_G4X(dev))
3858 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003859
Daniel Vetterb79480b2013-06-27 17:52:10 +02003860 /* Interrupt setup is already guaranteed to be single-threaded, this is
3861 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003862 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02003863 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3864 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3865 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02003866 spin_unlock_irq(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003867
Chris Wilsona266c7d2012-04-24 22:59:44 +01003868 /*
3869 * Enable some error detection, note the instruction error mask
3870 * bit is reserved, so we leave it masked.
3871 */
3872 if (IS_G4X(dev)) {
3873 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3874 GM45_ERROR_MEM_PRIV |
3875 GM45_ERROR_CP_PRIV |
3876 I915_ERROR_MEMORY_REFRESH);
3877 } else {
3878 error_mask = ~(I915_ERROR_PAGE_TABLE |
3879 I915_ERROR_MEMORY_REFRESH);
3880 }
3881 I915_WRITE(EMR, error_mask);
3882
3883 I915_WRITE(IMR, dev_priv->irq_mask);
3884 I915_WRITE(IER, enable_mask);
3885 POSTING_READ(IER);
3886
Daniel Vetter20afbda2012-12-11 14:05:07 +01003887 I915_WRITE(PORT_HOTPLUG_EN, 0);
3888 POSTING_READ(PORT_HOTPLUG_EN);
3889
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003890 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003891
3892 return 0;
3893}
3894
Egbert Eichbac56d52013-02-25 12:06:51 -05003895static void i915_hpd_irq_setup(struct drm_device *dev)
Daniel Vetter20afbda2012-12-11 14:05:07 +01003896{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003897 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eichcd569ae2013-04-16 13:36:57 +02003898 struct intel_encoder *intel_encoder;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003899 u32 hotplug_en;
3900
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003901 assert_spin_locked(&dev_priv->irq_lock);
3902
Ville Syrjälä778eb332015-01-09 14:21:13 +02003903 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3904 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3905 /* Note HDMI and DP share hotplug bits */
3906 /* enable bits are the same for all generations */
3907 for_each_intel_encoder(dev, intel_encoder)
Jani Nikula5fcece82015-05-27 15:03:42 +03003908 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
Ville Syrjälä778eb332015-01-09 14:21:13 +02003909 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3910 /* Programming the CRT detection parameters tends
3911 to generate a spurious hotplug event about three
3912 seconds later. So just do it once.
3913 */
3914 if (IS_G4X(dev))
3915 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3916 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3917 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003918
Ville Syrjälä778eb332015-01-09 14:21:13 +02003919 /* Ignore TV since it's buggy */
3920 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003921}
3922
Daniel Vetterff1f5252012-10-02 15:10:55 +02003923static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003924{
Daniel Vetter45a83f82014-05-12 19:17:55 +02003925 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03003926 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003927 u32 iir, new_iir;
3928 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003929 int ret = IRQ_NONE, pipe;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003930 u32 flip_mask =
3931 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3932 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003933
Imre Deak2dd2a882015-02-24 11:14:30 +02003934 if (!intel_irqs_enabled(dev_priv))
3935 return IRQ_NONE;
3936
Chris Wilsona266c7d2012-04-24 22:59:44 +01003937 iir = I915_READ(IIR);
3938
Chris Wilsona266c7d2012-04-24 22:59:44 +01003939 for (;;) {
Ville Syrjälä501e01d2014-01-17 11:35:15 +02003940 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson2c8ba292012-04-24 22:59:46 +01003941 bool blc_event = false;
3942
Chris Wilsona266c7d2012-04-24 22:59:44 +01003943 /* Can't rely on pipestat interrupt bit in iir as it might
3944 * have been cleared after the pipestat interrupt was received.
3945 * It doesn't set the bit in iir again, but it still produces
3946 * interrupts (for non-MSI).
3947 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02003948 spin_lock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003949 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Daniel Vetteraaecdf62014-11-04 15:52:22 +01003950 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003951
Damien Lespiau055e3932014-08-18 13:49:10 +01003952 for_each_pipe(dev_priv, pipe) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003953 int reg = PIPESTAT(pipe);
3954 pipe_stats[pipe] = I915_READ(reg);
3955
3956 /*
3957 * Clear the PIPE*STAT regs before the IIR
3958 */
3959 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003960 I915_WRITE(reg, pipe_stats[pipe]);
Ville Syrjälä501e01d2014-01-17 11:35:15 +02003961 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003962 }
3963 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02003964 spin_unlock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003965
3966 if (!irq_received)
3967 break;
3968
3969 ret = IRQ_HANDLED;
3970
3971 /* Consume port. Then clear IIR or we'll miss events */
Ville Syrjälä16c6c562014-04-01 10:54:36 +03003972 if (iir & I915_DISPLAY_PORT_INTERRUPT)
3973 i9xx_hpd_irq_handler(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003974
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003975 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003976 new_iir = I915_READ(IIR); /* Flush posted writes */
3977
Chris Wilsona266c7d2012-04-24 22:59:44 +01003978 if (iir & I915_USER_INTERRUPT)
Chris Wilson74cdb332015-04-07 16:21:05 +01003979 notify_ring(&dev_priv->ring[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003980 if (iir & I915_BSD_USER_INTERRUPT)
Chris Wilson74cdb332015-04-07 16:21:05 +01003981 notify_ring(&dev_priv->ring[VCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003982
Damien Lespiau055e3932014-08-18 13:49:10 +01003983 for_each_pipe(dev_priv, pipe) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01003984 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003985 i915_handle_vblank(dev, pipe, pipe, iir))
3986 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003987
3988 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3989 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003990
3991 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003992 i9xx_pipe_crc_irq_handler(dev, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003993
Daniel Vetter1f7247c2014-09-30 10:56:48 +02003994 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3995 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003996 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003997
3998 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3999 intel_opregion_asle_intr(dev);
4000
Daniel Vetter515ac2b2012-12-01 13:53:44 +01004001 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4002 gmbus_irq_handler(dev);
4003
Chris Wilsona266c7d2012-04-24 22:59:44 +01004004 /* With MSI, interrupts are only generated when iir
4005 * transitions from zero to nonzero. If another bit got
4006 * set while we were handling the existing iir bits, then
4007 * we would never get another interrupt.
4008 *
4009 * This is fine on non-MSI as well, as if we hit this path
4010 * we avoid exiting the interrupt handler only to generate
4011 * another one.
4012 *
4013 * Note that for MSI this could cause a stray interrupt report
4014 * if an interrupt landed in the time between writing IIR and
4015 * the posting read. This should be rare enough to never
4016 * trigger the 99% of 100,000 interrupts test for disabling
4017 * stray interrupts.
4018 */
4019 iir = new_iir;
4020 }
4021
4022 return ret;
4023}
4024
4025static void i965_irq_uninstall(struct drm_device * dev)
4026{
Jani Nikula2d1013d2014-03-31 14:27:17 +03004027 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004028 int pipe;
4029
4030 if (!dev_priv)
4031 return;
4032
Chris Wilsonadca4732012-05-11 18:01:31 +01004033 I915_WRITE(PORT_HOTPLUG_EN, 0);
4034 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01004035
4036 I915_WRITE(HWSTAM, 0xffffffff);
Damien Lespiau055e3932014-08-18 13:49:10 +01004037 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004038 I915_WRITE(PIPESTAT(pipe), 0);
4039 I915_WRITE(IMR, 0xffffffff);
4040 I915_WRITE(IER, 0x0);
4041
Damien Lespiau055e3932014-08-18 13:49:10 +01004042 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004043 I915_WRITE(PIPESTAT(pipe),
4044 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4045 I915_WRITE(IIR, I915_READ(IIR));
4046}
4047
Daniel Vetterfca52a52014-09-30 10:56:45 +02004048/**
4049 * intel_irq_init - initializes irq support
4050 * @dev_priv: i915 device instance
4051 *
4052 * This function initializes all the irq support including work items, timers
4053 * and all the vtables. It does not setup the interrupt itself though.
4054 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004055void intel_irq_init(struct drm_i915_private *dev_priv)
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004056{
Daniel Vetterb9632912014-09-30 10:56:44 +02004057 struct drm_device *dev = dev_priv->dev;
Chris Wilson8b2e3262012-04-24 22:59:41 +01004058
Jani Nikula77913b32015-06-18 13:06:16 +03004059 intel_hpd_init_work(dev_priv);
4060
Daniel Vetterc6a828d2012-08-08 23:35:35 +02004061 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004062 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Chris Wilson8b2e3262012-04-24 22:59:41 +01004063
Deepak Sa6706b42014-03-15 20:23:22 +05304064 /* Let's track the enabled rps events */
Daniel Vetterb9632912014-09-30 10:56:44 +02004065 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
Ville Syrjälä6c65a582014-08-29 14:14:07 +03004066 /* WaGsvRC0ResidencyMethod:vlv */
Chris Wilson6f4b12f82015-03-18 09:48:23 +00004067 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
Deepak S31685c22014-07-03 17:33:01 -04004068 else
4069 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
Deepak Sa6706b42014-03-15 20:23:22 +05304070
Chris Wilson737b1502015-01-26 18:03:03 +02004071 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4072 i915_hangcheck_elapsed);
Daniel Vetter61bac782012-12-01 21:03:21 +01004073
Tomas Janousek97a19a22012-12-08 13:48:13 +01004074 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01004075
Daniel Vetterb9632912014-09-30 10:56:44 +02004076 if (IS_GEN2(dev_priv)) {
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +03004077 dev->max_vblank_count = 0;
4078 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
Daniel Vetterb9632912014-09-30 10:56:44 +02004079 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004080 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4081 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
Ville Syrjälä391f75e2013-09-25 19:55:26 +03004082 } else {
4083 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4084 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004085 }
4086
Ville Syrjälä21da2702014-08-06 14:49:55 +03004087 /*
4088 * Opt out of the vblank disable timer on everything except gen2.
4089 * Gen2 doesn't have a hardware frame counter and so depends on
4090 * vblank interrupts to produce sane vblank seuquence numbers.
4091 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004092 if (!IS_GEN2(dev_priv))
Ville Syrjälä21da2702014-08-06 14:49:55 +03004093 dev->vblank_disable_immediate = true;
4094
Daniel Vetterf3a5c3f2015-02-13 21:03:44 +01004095 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4096 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004097
Daniel Vetterb9632912014-09-30 10:56:44 +02004098 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004099 dev->driver->irq_handler = cherryview_irq_handler;
4100 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4101 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4102 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4103 dev->driver->enable_vblank = valleyview_enable_vblank;
4104 dev->driver->disable_vblank = valleyview_disable_vblank;
4105 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Daniel Vetterb9632912014-09-30 10:56:44 +02004106 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07004107 dev->driver->irq_handler = valleyview_irq_handler;
4108 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4109 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4110 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4111 dev->driver->enable_vblank = valleyview_enable_vblank;
4112 dev->driver->disable_vblank = valleyview_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05004113 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Daniel Vetterb9632912014-09-30 10:56:44 +02004114 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
Ben Widawskyabd58f02013-11-02 21:07:09 -07004115 dev->driver->irq_handler = gen8_irq_handler;
Daniel Vetter723761b2014-05-22 17:56:34 +02004116 dev->driver->irq_preinstall = gen8_irq_reset;
Ben Widawskyabd58f02013-11-02 21:07:09 -07004117 dev->driver->irq_postinstall = gen8_irq_postinstall;
4118 dev->driver->irq_uninstall = gen8_irq_uninstall;
4119 dev->driver->enable_vblank = gen8_enable_vblank;
4120 dev->driver->disable_vblank = gen8_disable_vblank;
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02004121 if (HAS_PCH_SPLIT(dev))
4122 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4123 else
4124 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004125 } else if (HAS_PCH_SPLIT(dev)) {
4126 dev->driver->irq_handler = ironlake_irq_handler;
Daniel Vetter723761b2014-05-22 17:56:34 +02004127 dev->driver->irq_preinstall = ironlake_irq_reset;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004128 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4129 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4130 dev->driver->enable_vblank = ironlake_enable_vblank;
4131 dev->driver->disable_vblank = ironlake_disable_vblank;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01004132 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004133 } else {
Daniel Vetterb9632912014-09-30 10:56:44 +02004134 if (INTEL_INFO(dev_priv)->gen == 2) {
Chris Wilsonc2798b12012-04-22 21:13:57 +01004135 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4136 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4137 dev->driver->irq_handler = i8xx_irq_handler;
4138 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Daniel Vetterb9632912014-09-30 10:56:44 +02004139 } else if (INTEL_INFO(dev_priv)->gen == 3) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004140 dev->driver->irq_preinstall = i915_irq_preinstall;
4141 dev->driver->irq_postinstall = i915_irq_postinstall;
4142 dev->driver->irq_uninstall = i915_irq_uninstall;
4143 dev->driver->irq_handler = i915_irq_handler;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004144 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004145 dev->driver->irq_preinstall = i965_irq_preinstall;
4146 dev->driver->irq_postinstall = i965_irq_postinstall;
4147 dev->driver->irq_uninstall = i965_irq_uninstall;
4148 dev->driver->irq_handler = i965_irq_handler;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004149 }
Ville Syrjälä778eb332015-01-09 14:21:13 +02004150 if (I915_HAS_HOTPLUG(dev_priv))
4151 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004152 dev->driver->enable_vblank = i915_enable_vblank;
4153 dev->driver->disable_vblank = i915_disable_vblank;
4154 }
4155}
Daniel Vetter20afbda2012-12-11 14:05:07 +01004156
Daniel Vetterfca52a52014-09-30 10:56:45 +02004157/**
Daniel Vetterfca52a52014-09-30 10:56:45 +02004158 * intel_irq_install - enables the hardware interrupt
4159 * @dev_priv: i915 device instance
4160 *
4161 * This function enables the hardware interrupt handling, but leaves the hotplug
4162 * handling still disabled. It is called after intel_irq_init().
4163 *
4164 * In the driver load and resume code we need working interrupts in a few places
4165 * but don't want to deal with the hassle of concurrent probe and hotplug
4166 * workers. Hence the split into this two-stage approach.
4167 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004168int intel_irq_install(struct drm_i915_private *dev_priv)
4169{
4170 /*
4171 * We enable some interrupt sources in our postinstall hooks, so mark
4172 * interrupts as enabled _before_ actually enabling them to avoid
4173 * special cases in our ordering checks.
4174 */
4175 dev_priv->pm.irqs_enabled = true;
4176
4177 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4178}
4179
Daniel Vetterfca52a52014-09-30 10:56:45 +02004180/**
4181 * intel_irq_uninstall - finilizes all irq handling
4182 * @dev_priv: i915 device instance
4183 *
4184 * This stops interrupt and hotplug handling and unregisters and frees all
4185 * resources acquired in the init functions.
4186 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004187void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4188{
4189 drm_irq_uninstall(dev_priv->dev);
4190 intel_hpd_cancel_work(dev_priv);
4191 dev_priv->pm.irqs_enabled = false;
4192}
4193
Daniel Vetterfca52a52014-09-30 10:56:45 +02004194/**
4195 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4196 * @dev_priv: i915 device instance
4197 *
4198 * This function is used to disable interrupts at runtime, both in the runtime
4199 * pm and the system suspend/resume code.
4200 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004201void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004202{
Daniel Vetterb9632912014-09-30 10:56:44 +02004203 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004204 dev_priv->pm.irqs_enabled = false;
Imre Deak2dd2a882015-02-24 11:14:30 +02004205 synchronize_irq(dev_priv->dev->irq);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004206}
4207
Daniel Vetterfca52a52014-09-30 10:56:45 +02004208/**
4209 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4210 * @dev_priv: i915 device instance
4211 *
4212 * This function is used to enable interrupts at runtime, both in the runtime
4213 * pm and the system suspend/resume code.
4214 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004215void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004216{
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004217 dev_priv->pm.irqs_enabled = true;
Daniel Vetterb9632912014-09-30 10:56:44 +02004218 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4219 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004220}