blob: cf991bdebf54e7cf963253202a3fde6c2cfce603 [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Damien Lespiaub2c88f52013-10-15 18:55:29 +010033#include <linux/circ_buf.h>
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/drmP.h>
35#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010037#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Daniel Vetterfca52a52014-09-30 10:56:45 +020040/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
Egbert Eiche5868a32013-02-28 04:17:12 -050048static const u32 hpd_ibx[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
54};
55
56static const u32 hpd_cpt[] = {
57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010058 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050059 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62};
63
64static const u32 hpd_mask_i915[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
71};
72
Daniel Vetter704cfb82013-12-18 09:08:43 +010073static const u32 hpd_status_g4x[] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050074 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80};
81
Egbert Eiche5868a32013-02-28 04:17:12 -050082static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89};
90
Paulo Zanoni5c502442014-04-01 15:37:11 -030091/* IIR can theoretically queue up two events. Be paranoid. */
Paulo Zanonif86f3fb2014-04-01 15:37:14 -030092#define GEN8_IRQ_RESET_NDX(type, which) do { \
Paulo Zanoni5c502442014-04-01 15:37:11 -030093 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 POSTING_READ(GEN8_##type##_IMR(which)); \
95 I915_WRITE(GEN8_##type##_IER(which), 0); \
96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 POSTING_READ(GEN8_##type##_IIR(which)); \
98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 POSTING_READ(GEN8_##type##_IIR(which)); \
100} while (0)
101
Paulo Zanonif86f3fb2014-04-01 15:37:14 -0300102#define GEN5_IRQ_RESET(type) do { \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300103 I915_WRITE(type##IMR, 0xffffffff); \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300104 POSTING_READ(type##IMR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300105 I915_WRITE(type##IER, 0); \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300106 I915_WRITE(type##IIR, 0xffffffff); \
107 POSTING_READ(type##IIR); \
108 I915_WRITE(type##IIR, 0xffffffff); \
109 POSTING_READ(type##IIR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300110} while (0)
111
Paulo Zanoni337ba012014-04-01 15:37:16 -0300112/*
113 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
114 */
115#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 u32 val = I915_READ(reg); \
117 if (val) { \
118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
119 (reg), val); \
120 I915_WRITE((reg), 0xffffffff); \
121 POSTING_READ(reg); \
122 I915_WRITE((reg), 0xffffffff); \
123 POSTING_READ(reg); \
124 } \
125} while (0)
126
Paulo Zanoni35079892014-04-01 15:37:15 -0300127#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
Paulo Zanoni337ba012014-04-01 15:37:16 -0300128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
Ville Syrjälä7d1bd5392014-10-30 19:42:50 +0200130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 POSTING_READ(GEN8_##type##_IMR(which)); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300132} while (0)
133
134#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
Paulo Zanoni337ba012014-04-01 15:37:16 -0300135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300136 I915_WRITE(type##IER, (ier_val)); \
Ville Syrjälä7d1bd5392014-10-30 19:42:50 +0200137 I915_WRITE(type##IMR, (imr_val)); \
138 POSTING_READ(type##IMR); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300139} while (0)
140
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800141/* For display hotplug interrupt */
Daniel Vetter47339cd2014-09-30 10:56:46 +0200142void
Jani Nikula2d1013d2014-03-31 14:27:17 +0300143ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800144{
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200145 assert_spin_locked(&dev_priv->irq_lock);
146
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700147 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300148 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300149
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000150 if ((dev_priv->irq_mask & mask) != 0) {
151 dev_priv->irq_mask &= ~mask;
152 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000153 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800154 }
155}
156
Daniel Vetter47339cd2014-09-30 10:56:46 +0200157void
Jani Nikula2d1013d2014-03-31 14:27:17 +0300158ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800159{
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200160 assert_spin_locked(&dev_priv->irq_lock);
161
Paulo Zanoni06ffc772014-07-17 17:43:46 -0300162 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300163 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300164
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000165 if ((dev_priv->irq_mask & mask) != mask) {
166 dev_priv->irq_mask |= mask;
167 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000168 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800169 }
170}
171
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300172/**
173 * ilk_update_gt_irq - update GTIMR
174 * @dev_priv: driver private
175 * @interrupt_mask: mask of interrupt bits to update
176 * @enabled_irq_mask: mask of interrupt bits to enable
177 */
178static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
179 uint32_t interrupt_mask,
180 uint32_t enabled_irq_mask)
181{
182 assert_spin_locked(&dev_priv->irq_lock);
183
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700184 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300185 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300186
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300187 dev_priv->gt_irq_mask &= ~interrupt_mask;
188 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
189 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
190 POSTING_READ(GTIMR);
191}
192
Daniel Vetter480c8032014-07-16 09:49:40 +0200193void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300194{
195 ilk_update_gt_irq(dev_priv, mask, mask);
196}
197
Daniel Vetter480c8032014-07-16 09:49:40 +0200198void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300199{
200 ilk_update_gt_irq(dev_priv, mask, 0);
201}
202
Imre Deaka72fbc32014-11-05 20:48:31 +0200203static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
204{
205 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
206}
207
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300208/**
209 * snb_update_pm_irq - update GEN6_PMIMR
210 * @dev_priv: driver private
211 * @interrupt_mask: mask of interrupt bits to update
212 * @enabled_irq_mask: mask of interrupt bits to enable
213 */
214static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
215 uint32_t interrupt_mask,
216 uint32_t enabled_irq_mask)
217{
Paulo Zanoni605cd252013-08-06 18:57:15 -0300218 uint32_t new_val;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300219
220 assert_spin_locked(&dev_priv->irq_lock);
221
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700222 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300223 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300224
Paulo Zanoni605cd252013-08-06 18:57:15 -0300225 new_val = dev_priv->pm_irq_mask;
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300226 new_val &= ~interrupt_mask;
227 new_val |= (~enabled_irq_mask & interrupt_mask);
228
Paulo Zanoni605cd252013-08-06 18:57:15 -0300229 if (new_val != dev_priv->pm_irq_mask) {
230 dev_priv->pm_irq_mask = new_val;
Imre Deaka72fbc32014-11-05 20:48:31 +0200231 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
232 POSTING_READ(gen6_pm_imr(dev_priv));
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300233 }
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300234}
235
Daniel Vetter480c8032014-07-16 09:49:40 +0200236void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300237{
238 snb_update_pm_irq(dev_priv, mask, mask);
239}
240
Daniel Vetter480c8032014-07-16 09:49:40 +0200241void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300242{
243 snb_update_pm_irq(dev_priv, mask, 0);
244}
245
Ben Widawsky09610212014-05-15 20:58:08 +0300246/**
Daniel Vetterfee884e2013-07-04 23:35:21 +0200247 * ibx_display_interrupt_update - update SDEIMR
248 * @dev_priv: driver private
249 * @interrupt_mask: mask of interrupt bits to update
250 * @enabled_irq_mask: mask of interrupt bits to enable
251 */
Daniel Vetter47339cd2014-09-30 10:56:46 +0200252void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
253 uint32_t interrupt_mask,
254 uint32_t enabled_irq_mask)
Daniel Vetterfee884e2013-07-04 23:35:21 +0200255{
256 uint32_t sdeimr = I915_READ(SDEIMR);
257 sdeimr &= ~interrupt_mask;
258 sdeimr |= (~enabled_irq_mask & interrupt_mask);
259
260 assert_spin_locked(&dev_priv->irq_lock);
261
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700262 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300263 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300264
Daniel Vetterfee884e2013-07-04 23:35:21 +0200265 I915_WRITE(SDEIMR, sdeimr);
266 POSTING_READ(SDEIMR);
267}
Paulo Zanoni86642812013-04-12 17:57:57 -0300268
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100269static void
Imre Deak755e9012014-02-10 18:42:47 +0200270__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
271 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800272{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200273 u32 reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200274 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800275
Daniel Vetterb79480b2013-06-27 17:52:10 +0200276 assert_spin_locked(&dev_priv->irq_lock);
Daniel Vetterd518ce52014-08-27 10:43:37 +0200277 WARN_ON(!intel_irqs_enabled(dev_priv));
Daniel Vetterb79480b2013-06-27 17:52:10 +0200278
Ville Syrjälä04feced2014-04-03 13:28:33 +0300279 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
280 status_mask & ~PIPESTAT_INT_STATUS_MASK,
281 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
282 pipe_name(pipe), enable_mask, status_mask))
Imre Deak755e9012014-02-10 18:42:47 +0200283 return;
284
285 if ((pipestat & enable_mask) == enable_mask)
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200286 return;
287
Imre Deak91d181d2014-02-10 18:42:49 +0200288 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
289
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200290 /* Enable the interrupt, clear any pending status */
Imre Deak755e9012014-02-10 18:42:47 +0200291 pipestat |= enable_mask | status_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200292 I915_WRITE(reg, pipestat);
293 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800294}
295
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100296static void
Imre Deak755e9012014-02-10 18:42:47 +0200297__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
298 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800299{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200300 u32 reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200301 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800302
Daniel Vetterb79480b2013-06-27 17:52:10 +0200303 assert_spin_locked(&dev_priv->irq_lock);
Daniel Vetterd518ce52014-08-27 10:43:37 +0200304 WARN_ON(!intel_irqs_enabled(dev_priv));
Daniel Vetterb79480b2013-06-27 17:52:10 +0200305
Ville Syrjälä04feced2014-04-03 13:28:33 +0300306 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
307 status_mask & ~PIPESTAT_INT_STATUS_MASK,
308 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
309 pipe_name(pipe), enable_mask, status_mask))
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200310 return;
311
Imre Deak755e9012014-02-10 18:42:47 +0200312 if ((pipestat & enable_mask) == 0)
313 return;
314
Imre Deak91d181d2014-02-10 18:42:49 +0200315 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
316
Imre Deak755e9012014-02-10 18:42:47 +0200317 pipestat &= ~enable_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200318 I915_WRITE(reg, pipestat);
319 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800320}
321
Imre Deak10c59c52014-02-10 18:42:48 +0200322static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
323{
324 u32 enable_mask = status_mask << 16;
325
326 /*
Ville Syrjälä724a6902014-04-09 13:28:48 +0300327 * On pipe A we don't support the PSR interrupt yet,
328 * on pipe B and C the same bit MBZ.
Imre Deak10c59c52014-02-10 18:42:48 +0200329 */
330 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
331 return 0;
Ville Syrjälä724a6902014-04-09 13:28:48 +0300332 /*
333 * On pipe B and C we don't support the PSR interrupt yet, on pipe
334 * A the same bit is for perf counters which we don't use either.
335 */
336 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
337 return 0;
Imre Deak10c59c52014-02-10 18:42:48 +0200338
339 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
340 SPRITE0_FLIP_DONE_INT_EN_VLV |
341 SPRITE1_FLIP_DONE_INT_EN_VLV);
342 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
343 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
344 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
345 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
346
347 return enable_mask;
348}
349
Imre Deak755e9012014-02-10 18:42:47 +0200350void
351i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
352 u32 status_mask)
353{
354 u32 enable_mask;
355
Imre Deak10c59c52014-02-10 18:42:48 +0200356 if (IS_VALLEYVIEW(dev_priv->dev))
357 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
358 status_mask);
359 else
360 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200361 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
362}
363
364void
365i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
366 u32 status_mask)
367{
368 u32 enable_mask;
369
Imre Deak10c59c52014-02-10 18:42:48 +0200370 if (IS_VALLEYVIEW(dev_priv->dev))
371 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
372 status_mask);
373 else
374 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200375 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
376}
377
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000378/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300379 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Zhao Yakui01c66882009-10-28 05:10:00 +0000380 */
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300381static void i915_enable_asle_pipestat(struct drm_device *dev)
Zhao Yakui01c66882009-10-28 05:10:00 +0000382{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300383 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000384
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300385 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
386 return;
387
Daniel Vetter13321782014-09-15 14:55:29 +0200388 spin_lock_irq(&dev_priv->irq_lock);
Zhao Yakui01c66882009-10-28 05:10:00 +0000389
Imre Deak755e9012014-02-10 18:42:47 +0200390 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
Jani Nikulaf8987802013-04-29 13:02:53 +0300391 if (INTEL_INFO(dev)->gen >= 4)
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200392 i915_enable_pipestat(dev_priv, PIPE_A,
Imre Deak755e9012014-02-10 18:42:47 +0200393 PIPE_LEGACY_BLC_EVENT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000394
Daniel Vetter13321782014-09-15 14:55:29 +0200395 spin_unlock_irq(&dev_priv->irq_lock);
Zhao Yakui01c66882009-10-28 05:10:00 +0000396}
397
398/**
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700399 * i915_pipe_enabled - check if a pipe is enabled
400 * @dev: DRM device
401 * @pipe: pipe to check
402 *
403 * Reading certain registers when the pipe is disabled can hang the chip.
404 * Use this routine to make sure the PLL is running and the pipe is active
405 * before reading such registers if unsure.
406 */
407static int
408i915_pipe_enabled(struct drm_device *dev, int pipe)
409{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300410 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni702e7a52012-10-23 18:29:59 -0200411
Daniel Vettera01025a2013-05-22 00:50:23 +0200412 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
413 /* Locking is horribly broken here, but whatever. */
414 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
415 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni71f8ba62013-05-03 12:15:39 -0300416
Daniel Vettera01025a2013-05-22 00:50:23 +0200417 return intel_crtc->active;
418 } else {
419 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
420 }
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700421}
422
Ville Syrjäläf75f3742014-05-15 20:20:36 +0300423/*
424 * This timing diagram depicts the video signal in and
425 * around the vertical blanking period.
426 *
427 * Assumptions about the fictitious mode used in this example:
428 * vblank_start >= 3
429 * vsync_start = vblank_start + 1
430 * vsync_end = vblank_start + 2
431 * vtotal = vblank_start + 3
432 *
433 * start of vblank:
434 * latch double buffered registers
435 * increment frame counter (ctg+)
436 * generate start of vblank interrupt (gen4+)
437 * |
438 * | frame start:
439 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
440 * | may be shifted forward 1-3 extra lines via PIPECONF
441 * | |
442 * | | start of vsync:
443 * | | generate vsync interrupt
444 * | | |
445 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
446 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
447 * ----va---> <-----------------vb--------------------> <--------va-------------
448 * | | <----vs-----> |
449 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
450 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
451 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
452 * | | |
453 * last visible pixel first visible pixel
454 * | increment frame counter (gen3/4)
455 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
456 *
457 * x = horizontal active
458 * _ = horizontal blanking
459 * hs = horizontal sync
460 * va = vertical active
461 * vb = vertical blanking
462 * vs = vertical sync
463 * vbs = vblank_start (number)
464 *
465 * Summary:
466 * - most events happen at the start of horizontal sync
467 * - frame start happens at the start of horizontal blank, 1-4 lines
468 * (depending on PIPECONF settings) after the start of vblank
469 * - gen3/4 pixel and frame counter are synchronized with the start
470 * of horizontal active on the first line of vertical active
471 */
472
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +0300473static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
474{
475 /* Gen2 doesn't have a hardware frame counter */
476 return 0;
477}
478
Keith Packard42f52ef2008-10-18 19:39:29 -0700479/* Called from drm generic code, passed a 'crtc', which
480 * we use as a pipe index
481 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700482static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700483{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300484 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700485 unsigned long high_frame;
486 unsigned long low_frame;
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300487 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700488
489 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800490 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800491 "pipe %c\n", pipe_name(pipe));
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700492 return 0;
493 }
494
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300495 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
496 struct intel_crtc *intel_crtc =
497 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
498 const struct drm_display_mode *mode =
499 &intel_crtc->config.adjusted_mode;
500
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300501 htotal = mode->crtc_htotal;
502 hsync_start = mode->crtc_hsync_start;
503 vbl_start = mode->crtc_vblank_start;
504 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
505 vbl_start = DIV_ROUND_UP(vbl_start, 2);
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300506 } else {
Daniel Vettera2d213d2014-02-07 16:34:05 +0100507 enum transcoder cpu_transcoder = (enum transcoder) pipe;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300508
509 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300510 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300511 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300512 if ((I915_READ(PIPECONF(cpu_transcoder)) &
513 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
514 vbl_start = DIV_ROUND_UP(vbl_start, 2);
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300515 }
516
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300517 /* Convert to pixel count */
518 vbl_start *= htotal;
519
520 /* Start of vblank event occurs at start of hsync */
521 vbl_start -= htotal - hsync_start;
522
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800523 high_frame = PIPEFRAME(pipe);
524 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100525
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700526 /*
527 * High & low register fields aren't synchronized, so make sure
528 * we get a low value that's stable across two reads of the high
529 * register.
530 */
531 do {
Chris Wilson5eddb702010-09-11 13:48:45 +0100532 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300533 low = I915_READ(low_frame);
Chris Wilson5eddb702010-09-11 13:48:45 +0100534 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700535 } while (high1 != high2);
536
Chris Wilson5eddb702010-09-11 13:48:45 +0100537 high1 >>= PIPE_FRAME_HIGH_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300538 pixel = low & PIPE_PIXEL_MASK;
Chris Wilson5eddb702010-09-11 13:48:45 +0100539 low >>= PIPE_FRAME_LOW_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300540
541 /*
542 * The frame counter increments at beginning of active.
543 * Cook up a vblank counter by also checking the pixel
544 * counter against vblank start.
545 */
Ville Syrjäläedc08d02013-11-06 13:56:27 -0200546 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700547}
548
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700549static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800550{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300551 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800552 int reg = PIPE_FRMCOUNT_GM45(pipe);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800553
554 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800555 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800556 "pipe %c\n", pipe_name(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800557 return 0;
558 }
559
560 return I915_READ(reg);
561}
562
Mario Kleinerad3543e2013-10-30 05:13:08 +0100563/* raw reads, only for fast reads of display block, no need for forcewake etc. */
564#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
Mario Kleinerad3543e2013-10-30 05:13:08 +0100565
Ville Syrjäläa225f072014-04-29 13:35:45 +0300566static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
567{
568 struct drm_device *dev = crtc->base.dev;
569 struct drm_i915_private *dev_priv = dev->dev_private;
570 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
571 enum pipe pipe = crtc->pipe;
Ville Syrjälä80715b22014-05-15 20:23:23 +0300572 int position, vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300573
Ville Syrjälä80715b22014-05-15 20:23:23 +0300574 vtotal = mode->crtc_vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300575 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
576 vtotal /= 2;
577
578 if (IS_GEN2(dev))
579 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
580 else
581 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
582
583 /*
Ville Syrjälä80715b22014-05-15 20:23:23 +0300584 * See update_scanline_offset() for the details on the
585 * scanline_offset adjustment.
Ville Syrjäläa225f072014-04-29 13:35:45 +0300586 */
Ville Syrjälä80715b22014-05-15 20:23:23 +0300587 return (position + crtc->scanline_offset) % vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300588}
589
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700590static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
Ville Syrjäläabca9e42013-10-28 20:50:48 +0200591 unsigned int flags, int *vpos, int *hpos,
592 ktime_t *stime, ktime_t *etime)
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100593{
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300594 struct drm_i915_private *dev_priv = dev->dev_private;
595 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
596 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
597 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300598 int position;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300599 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100600 bool in_vbl = true;
601 int ret = 0;
Mario Kleinerad3543e2013-10-30 05:13:08 +0100602 unsigned long irqflags;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100603
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300604 if (!intel_crtc->active) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100605 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800606 "pipe %c\n", pipe_name(pipe));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100607 return 0;
608 }
609
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300610 htotal = mode->crtc_htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300611 hsync_start = mode->crtc_hsync_start;
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300612 vtotal = mode->crtc_vtotal;
613 vbl_start = mode->crtc_vblank_start;
614 vbl_end = mode->crtc_vblank_end;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100615
Ville Syrjäläd31faf62013-10-28 16:31:41 +0200616 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
617 vbl_start = DIV_ROUND_UP(vbl_start, 2);
618 vbl_end /= 2;
619 vtotal /= 2;
620 }
621
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300622 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
623
Mario Kleinerad3543e2013-10-30 05:13:08 +0100624 /*
625 * Lock uncore.lock, as we will do multiple timing critical raw
626 * register reads, potentially with preemption disabled, so the
627 * following code must not block on uncore.lock.
628 */
629 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300630
Mario Kleinerad3543e2013-10-30 05:13:08 +0100631 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
632
633 /* Get optional system timestamp before query. */
634 if (stime)
635 *stime = ktime_get();
636
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300637 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100638 /* No obvious pixelcount register. Only query vertical
639 * scanout position from Display scan line register.
640 */
Ville Syrjäläa225f072014-04-29 13:35:45 +0300641 position = __intel_get_crtc_scanline(intel_crtc);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100642 } else {
643 /* Have access to pixelcount since start of frame.
644 * We can split this into vertical and horizontal
645 * scanout position.
646 */
Mario Kleinerad3543e2013-10-30 05:13:08 +0100647 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100648
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300649 /* convert to pixel counts */
650 vbl_start *= htotal;
651 vbl_end *= htotal;
652 vtotal *= htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300653
654 /*
Ville Syrjälä7e78f1cb2014-04-29 13:35:49 +0300655 * In interlaced modes, the pixel counter counts all pixels,
656 * so one field will have htotal more pixels. In order to avoid
657 * the reported position from jumping backwards when the pixel
658 * counter is beyond the length of the shorter field, just
659 * clamp the position the length of the shorter field. This
660 * matches how the scanline counter based position works since
661 * the scanline counter doesn't count the two half lines.
662 */
663 if (position >= vtotal)
664 position = vtotal - 1;
665
666 /*
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300667 * Start of vblank interrupt is triggered at start of hsync,
668 * just prior to the first active line of vblank. However we
669 * consider lines to start at the leading edge of horizontal
670 * active. So, should we get here before we've crossed into
671 * the horizontal active of the first line in vblank, we would
672 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
673 * always add htotal-hsync_start to the current pixel position.
674 */
675 position = (position + htotal - hsync_start) % vtotal;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300676 }
677
Mario Kleinerad3543e2013-10-30 05:13:08 +0100678 /* Get optional system timestamp after query. */
679 if (etime)
680 *etime = ktime_get();
681
682 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
683
684 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
685
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300686 in_vbl = position >= vbl_start && position < vbl_end;
687
688 /*
689 * While in vblank, position will be negative
690 * counting up towards 0 at vbl_end. And outside
691 * vblank, position will be positive counting
692 * up since vbl_end.
693 */
694 if (position >= vbl_start)
695 position -= vbl_end;
696 else
697 position += vtotal - vbl_end;
698
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300699 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300700 *vpos = position;
701 *hpos = 0;
702 } else {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100703 *vpos = position / htotal;
704 *hpos = position - (*vpos * htotal);
705 }
706
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100707 /* In vblank? */
708 if (in_vbl)
Daniel Vetter3d3cbd82014-09-10 17:36:11 +0200709 ret |= DRM_SCANOUTPOS_IN_VBLANK;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100710
711 return ret;
712}
713
Ville Syrjäläa225f072014-04-29 13:35:45 +0300714int intel_get_crtc_scanline(struct intel_crtc *crtc)
715{
716 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
717 unsigned long irqflags;
718 int position;
719
720 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
721 position = __intel_get_crtc_scanline(crtc);
722 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
723
724 return position;
725}
726
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700727static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100728 int *max_error,
729 struct timeval *vblank_time,
730 unsigned flags)
731{
Chris Wilson4041b852011-01-22 10:07:56 +0000732 struct drm_crtc *crtc;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100733
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700734 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
Chris Wilson4041b852011-01-22 10:07:56 +0000735 DRM_ERROR("Invalid crtc %d\n", pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100736 return -EINVAL;
737 }
738
739 /* Get drm_crtc to timestamp: */
Chris Wilson4041b852011-01-22 10:07:56 +0000740 crtc = intel_get_crtc_for_pipe(dev, pipe);
741 if (crtc == NULL) {
742 DRM_ERROR("Invalid crtc %d\n", pipe);
743 return -EINVAL;
744 }
745
746 if (!crtc->enabled) {
747 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
748 return -EBUSY;
749 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100750
751 /* Helper routine in DRM core does all the work: */
Chris Wilson4041b852011-01-22 10:07:56 +0000752 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
753 vblank_time, flags,
Ville Syrjälä7da903e2013-10-26 17:57:31 +0300754 crtc,
755 &to_intel_crtc(crtc)->config.adjusted_mode);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100756}
757
Jani Nikula67c347f2013-09-17 14:26:34 +0300758static bool intel_hpd_irq_event(struct drm_device *dev,
759 struct drm_connector *connector)
Egbert Eich321a1b32013-04-11 16:00:26 +0200760{
761 enum drm_connector_status old_status;
762
763 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
764 old_status = connector->status;
765
766 connector->status = connector->funcs->detect(connector, false);
Jani Nikula67c347f2013-09-17 14:26:34 +0300767 if (old_status == connector->status)
768 return false;
769
770 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
Egbert Eich321a1b32013-04-11 16:00:26 +0200771 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +0300772 connector->name,
Jani Nikula67c347f2013-09-17 14:26:34 +0300773 drm_get_connector_status_name(old_status),
774 drm_get_connector_status_name(connector->status));
775
776 return true;
Egbert Eich321a1b32013-04-11 16:00:26 +0200777}
778
Dave Airlie13cf5502014-06-18 11:29:35 +1000779static void i915_digport_work_func(struct work_struct *work)
780{
781 struct drm_i915_private *dev_priv =
782 container_of(work, struct drm_i915_private, dig_port_work);
Dave Airlie13cf5502014-06-18 11:29:35 +1000783 u32 long_port_mask, short_port_mask;
784 struct intel_digital_port *intel_dig_port;
785 int i, ret;
786 u32 old_bits = 0;
787
Daniel Vetter4cb21832014-09-15 14:55:26 +0200788 spin_lock_irq(&dev_priv->irq_lock);
Dave Airlie13cf5502014-06-18 11:29:35 +1000789 long_port_mask = dev_priv->long_hpd_port_mask;
790 dev_priv->long_hpd_port_mask = 0;
791 short_port_mask = dev_priv->short_hpd_port_mask;
792 dev_priv->short_hpd_port_mask = 0;
Daniel Vetter4cb21832014-09-15 14:55:26 +0200793 spin_unlock_irq(&dev_priv->irq_lock);
Dave Airlie13cf5502014-06-18 11:29:35 +1000794
795 for (i = 0; i < I915_MAX_PORTS; i++) {
796 bool valid = false;
797 bool long_hpd = false;
798 intel_dig_port = dev_priv->hpd_irq_port[i];
799 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
800 continue;
801
802 if (long_port_mask & (1 << i)) {
803 valid = true;
804 long_hpd = true;
805 } else if (short_port_mask & (1 << i))
806 valid = true;
807
808 if (valid) {
809 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
810 if (ret == true) {
811 /* if we get true fallback to old school hpd */
812 old_bits |= (1 << intel_dig_port->base.hpd_pin);
813 }
814 }
815 }
816
817 if (old_bits) {
Daniel Vetter4cb21832014-09-15 14:55:26 +0200818 spin_lock_irq(&dev_priv->irq_lock);
Dave Airlie13cf5502014-06-18 11:29:35 +1000819 dev_priv->hpd_event_bits |= old_bits;
Daniel Vetter4cb21832014-09-15 14:55:26 +0200820 spin_unlock_irq(&dev_priv->irq_lock);
Dave Airlie13cf5502014-06-18 11:29:35 +1000821 schedule_work(&dev_priv->hotplug_work);
822 }
823}
824
Jesse Barnes5ca58282009-03-31 14:11:15 -0700825/*
826 * Handle hotplug events outside the interrupt handler proper.
827 */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200828#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
829
Jesse Barnes5ca58282009-03-31 14:11:15 -0700830static void i915_hotplug_work_func(struct work_struct *work)
831{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300832 struct drm_i915_private *dev_priv =
833 container_of(work, struct drm_i915_private, hotplug_work);
Jesse Barnes5ca58282009-03-31 14:11:15 -0700834 struct drm_device *dev = dev_priv->dev;
Keith Packardc31c4ba2009-05-06 11:48:58 -0700835 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200836 struct intel_connector *intel_connector;
837 struct intel_encoder *intel_encoder;
838 struct drm_connector *connector;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200839 bool hpd_disabled = false;
Egbert Eich321a1b32013-04-11 16:00:26 +0200840 bool changed = false;
Egbert Eich142e2392013-04-11 15:57:57 +0200841 u32 hpd_event_bits;
Jesse Barnes5ca58282009-03-31 14:11:15 -0700842
Keith Packarda65e34c2011-07-25 10:04:56 -0700843 mutex_lock(&mode_config->mutex);
Jesse Barnese67189ab2011-02-11 14:44:51 -0800844 DRM_DEBUG_KMS("running encoder hotplug functions\n");
845
Daniel Vetter4cb21832014-09-15 14:55:26 +0200846 spin_lock_irq(&dev_priv->irq_lock);
Egbert Eich142e2392013-04-11 15:57:57 +0200847
848 hpd_event_bits = dev_priv->hpd_event_bits;
849 dev_priv->hpd_event_bits = 0;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200850 list_for_each_entry(connector, &mode_config->connector_list, head) {
851 intel_connector = to_intel_connector(connector);
Dave Airlie36cd7442014-05-02 13:44:18 +1000852 if (!intel_connector->encoder)
853 continue;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200854 intel_encoder = intel_connector->encoder;
855 if (intel_encoder->hpd_pin > HPD_NONE &&
856 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
857 connector->polled == DRM_CONNECTOR_POLL_HPD) {
858 DRM_INFO("HPD interrupt storm detected on connector %s: "
859 "switching from hotplug detection to polling\n",
Jani Nikulac23cc412014-06-03 14:56:17 +0300860 connector->name);
Egbert Eichcd569ae2013-04-16 13:36:57 +0200861 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
862 connector->polled = DRM_CONNECTOR_POLL_CONNECT
863 | DRM_CONNECTOR_POLL_DISCONNECT;
864 hpd_disabled = true;
865 }
Egbert Eich142e2392013-04-11 15:57:57 +0200866 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
867 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
Jani Nikulac23cc412014-06-03 14:56:17 +0300868 connector->name, intel_encoder->hpd_pin);
Egbert Eich142e2392013-04-11 15:57:57 +0200869 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200870 }
871 /* if there were no outputs to poll, poll was disabled,
872 * therefore make sure it's enabled when disabling HPD on
873 * some connectors */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200874 if (hpd_disabled) {
Egbert Eichcd569ae2013-04-16 13:36:57 +0200875 drm_kms_helper_poll_enable(dev);
Imre Deak63237512014-08-18 15:37:02 +0300876 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
877 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
Egbert Eichac4c16c2013-04-16 13:36:58 +0200878 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200879
Daniel Vetter4cb21832014-09-15 14:55:26 +0200880 spin_unlock_irq(&dev_priv->irq_lock);
Egbert Eichcd569ae2013-04-16 13:36:57 +0200881
Egbert Eich321a1b32013-04-11 16:00:26 +0200882 list_for_each_entry(connector, &mode_config->connector_list, head) {
883 intel_connector = to_intel_connector(connector);
Dave Airlie36cd7442014-05-02 13:44:18 +1000884 if (!intel_connector->encoder)
885 continue;
Egbert Eich321a1b32013-04-11 16:00:26 +0200886 intel_encoder = intel_connector->encoder;
887 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
888 if (intel_encoder->hot_plug)
889 intel_encoder->hot_plug(intel_encoder);
890 if (intel_hpd_irq_event(dev, connector))
891 changed = true;
892 }
893 }
Keith Packard40ee3382011-07-28 15:31:19 -0700894 mutex_unlock(&mode_config->mutex);
895
Egbert Eich321a1b32013-04-11 16:00:26 +0200896 if (changed)
897 drm_kms_helper_hotplug_event(dev);
Jesse Barnes5ca58282009-03-31 14:11:15 -0700898}
899
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200900static void ironlake_rps_change_irq_handler(struct drm_device *dev)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800901{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300902 struct drm_i915_private *dev_priv = dev->dev_private;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000903 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +0200904 u8 new_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200905
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200906 spin_lock(&mchdev_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800907
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200908 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
909
Daniel Vetter20e4d402012-08-08 23:35:39 +0200910 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200911
Jesse Barnes7648fa92010-05-20 14:28:11 -0700912 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000913 busy_up = I915_READ(RCPREVBSYTUPAVG);
914 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800915 max_avg = I915_READ(RCBMAXAVG);
916 min_avg = I915_READ(RCBMINAVG);
917
918 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000919 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200920 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
921 new_delay = dev_priv->ips.cur_delay - 1;
922 if (new_delay < dev_priv->ips.max_delay)
923 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000924 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200925 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
926 new_delay = dev_priv->ips.cur_delay + 1;
927 if (new_delay > dev_priv->ips.min_delay)
928 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800929 }
930
Jesse Barnes7648fa92010-05-20 14:28:11 -0700931 if (ironlake_set_drps(dev, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +0200932 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800933
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200934 spin_unlock(&mchdev_lock);
Daniel Vetter92703882012-08-09 16:46:01 +0200935
Jesse Barnesf97108d2010-01-29 11:27:07 -0800936 return;
937}
938
Chris Wilson549f7362010-10-19 11:19:32 +0100939static void notify_ring(struct drm_device *dev,
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100940 struct intel_engine_cs *ring)
Chris Wilson549f7362010-10-19 11:19:32 +0100941{
Oscar Mateo93b0a4e2014-05-22 14:13:36 +0100942 if (!intel_ring_initialized(ring))
Chris Wilson475553d2011-01-20 09:52:56 +0000943 return;
944
Chris Wilson814e9b52013-09-23 17:33:19 -0300945 trace_i915_gem_request_complete(ring);
Chris Wilson9862e602011-01-04 22:22:17 +0000946
Chris Wilson549f7362010-10-19 11:19:32 +0100947 wake_up_all(&ring->irq_queue);
Mika Kuoppala10cd45b2013-07-03 17:22:08 +0300948 i915_queue_hangcheck(dev);
Chris Wilson549f7362010-10-19 11:19:32 +0100949}
950
Deepak S31685c22014-07-03 17:33:01 -0400951static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
Chris Wilsonbf225f22014-07-10 20:31:18 +0100952 struct intel_rps_ei *rps_ei)
Deepak S31685c22014-07-03 17:33:01 -0400953{
954 u32 cz_ts, cz_freq_khz;
955 u32 render_count, media_count;
956 u32 elapsed_render, elapsed_media, elapsed_time;
957 u32 residency = 0;
958
959 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
960 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
961
962 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
963 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
964
Chris Wilsonbf225f22014-07-10 20:31:18 +0100965 if (rps_ei->cz_clock == 0) {
966 rps_ei->cz_clock = cz_ts;
967 rps_ei->render_c0 = render_count;
968 rps_ei->media_c0 = media_count;
Deepak S31685c22014-07-03 17:33:01 -0400969
970 return dev_priv->rps.cur_freq;
971 }
972
Chris Wilsonbf225f22014-07-10 20:31:18 +0100973 elapsed_time = cz_ts - rps_ei->cz_clock;
974 rps_ei->cz_clock = cz_ts;
Deepak S31685c22014-07-03 17:33:01 -0400975
Chris Wilsonbf225f22014-07-10 20:31:18 +0100976 elapsed_render = render_count - rps_ei->render_c0;
977 rps_ei->render_c0 = render_count;
Deepak S31685c22014-07-03 17:33:01 -0400978
Chris Wilsonbf225f22014-07-10 20:31:18 +0100979 elapsed_media = media_count - rps_ei->media_c0;
980 rps_ei->media_c0 = media_count;
Deepak S31685c22014-07-03 17:33:01 -0400981
982 /* Convert all the counters into common unit of milli sec */
983 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
984 elapsed_render /= cz_freq_khz;
985 elapsed_media /= cz_freq_khz;
986
987 /*
988 * Calculate overall C0 residency percentage
989 * only if elapsed time is non zero
990 */
991 if (elapsed_time) {
992 residency =
993 ((max(elapsed_render, elapsed_media) * 100)
994 / elapsed_time);
995 }
996
997 return residency;
998}
999
1000/**
1001 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1002 * busy-ness calculated from C0 counters of render & media power wells
1003 * @dev_priv: DRM device private
1004 *
1005 */
Damien Lespiau4fa79042014-08-08 19:25:57 +01001006static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
Deepak S31685c22014-07-03 17:33:01 -04001007{
1008 u32 residency_C0_up = 0, residency_C0_down = 0;
Damien Lespiau4fa79042014-08-08 19:25:57 +01001009 int new_delay, adj;
Deepak S31685c22014-07-03 17:33:01 -04001010
1011 dev_priv->rps.ei_interrupt_count++;
1012
1013 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1014
1015
Chris Wilsonbf225f22014-07-10 20:31:18 +01001016 if (dev_priv->rps.up_ei.cz_clock == 0) {
1017 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1018 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
Deepak S31685c22014-07-03 17:33:01 -04001019 return dev_priv->rps.cur_freq;
1020 }
1021
1022
1023 /*
1024 * To down throttle, C0 residency should be less than down threshold
1025 * for continous EI intervals. So calculate down EI counters
1026 * once in VLV_INT_COUNT_FOR_DOWN_EI
1027 */
1028 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1029
1030 dev_priv->rps.ei_interrupt_count = 0;
1031
1032 residency_C0_down = vlv_c0_residency(dev_priv,
Chris Wilsonbf225f22014-07-10 20:31:18 +01001033 &dev_priv->rps.down_ei);
Deepak S31685c22014-07-03 17:33:01 -04001034 } else {
1035 residency_C0_up = vlv_c0_residency(dev_priv,
Chris Wilsonbf225f22014-07-10 20:31:18 +01001036 &dev_priv->rps.up_ei);
Deepak S31685c22014-07-03 17:33:01 -04001037 }
1038
1039 new_delay = dev_priv->rps.cur_freq;
1040
1041 adj = dev_priv->rps.last_adj;
1042 /* C0 residency is greater than UP threshold. Increase Frequency */
1043 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1044 if (adj > 0)
1045 adj *= 2;
1046 else
1047 adj = 1;
1048
1049 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1050 new_delay = dev_priv->rps.cur_freq + adj;
1051
1052 /*
1053 * For better performance, jump directly
1054 * to RPe if we're below it.
1055 */
1056 if (new_delay < dev_priv->rps.efficient_freq)
1057 new_delay = dev_priv->rps.efficient_freq;
1058
1059 } else if (!dev_priv->rps.ei_interrupt_count &&
1060 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1061 if (adj < 0)
1062 adj *= 2;
1063 else
1064 adj = -1;
1065 /*
1066 * This means, C0 residency is less than down threshold over
1067 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1068 */
1069 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1070 new_delay = dev_priv->rps.cur_freq + adj;
1071 }
1072
1073 return new_delay;
1074}
1075
Ben Widawsky4912d042011-04-25 11:25:20 -07001076static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001077{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001078 struct drm_i915_private *dev_priv =
1079 container_of(work, struct drm_i915_private, rps.work);
Paulo Zanoniedbfdb42013-08-06 18:57:13 -03001080 u32 pm_iir;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001081 int new_delay, adj;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001082
Daniel Vetter59cdb632013-07-04 23:35:28 +02001083 spin_lock_irq(&dev_priv->irq_lock);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001084 pm_iir = dev_priv->rps.pm_iir;
1085 dev_priv->rps.pm_iir = 0;
Imre Deaka72fbc32014-11-05 20:48:31 +02001086 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1087 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Daniel Vetter59cdb632013-07-04 23:35:28 +02001088 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawsky4912d042011-04-25 11:25:20 -07001089
Paulo Zanoni60611c12013-08-15 11:50:01 -03001090 /* Make sure we didn't queue anything we're not going to process. */
Deepak Sa6706b42014-03-15 20:23:22 +05301091 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
Paulo Zanoni60611c12013-08-15 11:50:01 -03001092
Deepak Sa6706b42014-03-15 20:23:22 +05301093 if ((pm_iir & dev_priv->pm_rps_events) == 0)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001094 return;
1095
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001096 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01001097
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001098 adj = dev_priv->rps.last_adj;
Ville Syrjälä74250342013-06-25 21:38:11 +03001099 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001100 if (adj > 0)
1101 adj *= 2;
Deepak S13a56602014-05-23 21:00:21 +05301102 else {
1103 /* CHV needs even encode values */
1104 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1105 }
Ben Widawskyb39fb292014-03-19 18:31:11 -07001106 new_delay = dev_priv->rps.cur_freq + adj;
Ville Syrjälä74250342013-06-25 21:38:11 +03001107
1108 /*
1109 * For better performance, jump directly
1110 * to RPe if we're below it.
1111 */
Ben Widawskyb39fb292014-03-19 18:31:11 -07001112 if (new_delay < dev_priv->rps.efficient_freq)
1113 new_delay = dev_priv->rps.efficient_freq;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001114 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
Ben Widawskyb39fb292014-03-19 18:31:11 -07001115 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1116 new_delay = dev_priv->rps.efficient_freq;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001117 else
Ben Widawskyb39fb292014-03-19 18:31:11 -07001118 new_delay = dev_priv->rps.min_freq_softlimit;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001119 adj = 0;
Deepak S31685c22014-07-03 17:33:01 -04001120 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1121 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001122 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1123 if (adj < 0)
1124 adj *= 2;
Deepak S13a56602014-05-23 21:00:21 +05301125 else {
1126 /* CHV needs even encode values */
1127 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1128 }
Ben Widawskyb39fb292014-03-19 18:31:11 -07001129 new_delay = dev_priv->rps.cur_freq + adj;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001130 } else { /* unknown event */
Ben Widawskyb39fb292014-03-19 18:31:11 -07001131 new_delay = dev_priv->rps.cur_freq;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001132 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001133
Ben Widawsky79249632012-09-07 19:43:42 -07001134 /* sysfs frequency interfaces may have snuck in while servicing the
1135 * interrupt
1136 */
Ville Syrjälä1272e7b2013-11-07 19:57:49 +02001137 new_delay = clamp_t(int, new_delay,
Ben Widawskyb39fb292014-03-19 18:31:11 -07001138 dev_priv->rps.min_freq_softlimit,
1139 dev_priv->rps.max_freq_softlimit);
Deepak S27544362014-01-27 21:35:05 +05301140
Ben Widawskyb39fb292014-03-19 18:31:11 -07001141 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001142
1143 if (IS_VALLEYVIEW(dev_priv->dev))
1144 valleyview_set_rps(dev_priv->dev, new_delay);
1145 else
1146 gen6_set_rps(dev_priv->dev, new_delay);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001147
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001148 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001149}
1150
Ben Widawskye3689192012-05-25 16:56:22 -07001151
1152/**
1153 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1154 * occurred.
1155 * @work: workqueue struct
1156 *
1157 * Doesn't actually do anything except notify userspace. As a consequence of
1158 * this event, userspace should try to remap the bad rows since statistically
1159 * it is likely the same row is more likely to go bad again.
1160 */
1161static void ivybridge_parity_work(struct work_struct *work)
1162{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001163 struct drm_i915_private *dev_priv =
1164 container_of(work, struct drm_i915_private, l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001165 u32 error_status, row, bank, subbank;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001166 char *parity_event[6];
Ben Widawskye3689192012-05-25 16:56:22 -07001167 uint32_t misccpctl;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001168 uint8_t slice = 0;
Ben Widawskye3689192012-05-25 16:56:22 -07001169
1170 /* We must turn off DOP level clock gating to access the L3 registers.
1171 * In order to prevent a get/put style interface, acquire struct mutex
1172 * any time we access those registers.
1173 */
1174 mutex_lock(&dev_priv->dev->struct_mutex);
1175
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001176 /* If we've screwed up tracking, just let the interrupt fire again */
1177 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1178 goto out;
1179
Ben Widawskye3689192012-05-25 16:56:22 -07001180 misccpctl = I915_READ(GEN7_MISCCPCTL);
1181 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1182 POSTING_READ(GEN7_MISCCPCTL);
1183
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001184 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1185 u32 reg;
Ben Widawskye3689192012-05-25 16:56:22 -07001186
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001187 slice--;
1188 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1189 break;
1190
1191 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1192
1193 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1194
1195 error_status = I915_READ(reg);
1196 row = GEN7_PARITY_ERROR_ROW(error_status);
1197 bank = GEN7_PARITY_ERROR_BANK(error_status);
1198 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1199
1200 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1201 POSTING_READ(reg);
1202
1203 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1204 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1205 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1206 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1207 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1208 parity_event[5] = NULL;
1209
Dave Airlie5bdebb12013-10-11 14:07:25 +10001210 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001211 KOBJ_CHANGE, parity_event);
1212
1213 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1214 slice, row, bank, subbank);
1215
1216 kfree(parity_event[4]);
1217 kfree(parity_event[3]);
1218 kfree(parity_event[2]);
1219 kfree(parity_event[1]);
1220 }
Ben Widawskye3689192012-05-25 16:56:22 -07001221
1222 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1223
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001224out:
1225 WARN_ON(dev_priv->l3_parity.which_slice);
Daniel Vetter4cb21832014-09-15 14:55:26 +02001226 spin_lock_irq(&dev_priv->irq_lock);
Daniel Vetter480c8032014-07-16 09:49:40 +02001227 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
Daniel Vetter4cb21832014-09-15 14:55:26 +02001228 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001229
1230 mutex_unlock(&dev_priv->dev->struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001231}
1232
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001233static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
Ben Widawskye3689192012-05-25 16:56:22 -07001234{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001235 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskye3689192012-05-25 16:56:22 -07001236
Ben Widawsky040d2ba2013-09-19 11:01:40 -07001237 if (!HAS_L3_DPF(dev))
Ben Widawskye3689192012-05-25 16:56:22 -07001238 return;
1239
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001240 spin_lock(&dev_priv->irq_lock);
Daniel Vetter480c8032014-07-16 09:49:40 +02001241 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001242 spin_unlock(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001243
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001244 iir &= GT_PARITY_ERROR(dev);
1245 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1246 dev_priv->l3_parity.which_slice |= 1 << 1;
1247
1248 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1249 dev_priv->l3_parity.which_slice |= 1 << 0;
1250
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001251 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001252}
1253
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001254static void ilk_gt_irq_handler(struct drm_device *dev,
1255 struct drm_i915_private *dev_priv,
1256 u32 gt_iir)
1257{
1258 if (gt_iir &
1259 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1260 notify_ring(dev, &dev_priv->ring[RCS]);
1261 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1262 notify_ring(dev, &dev_priv->ring[VCS]);
1263}
1264
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001265static void snb_gt_irq_handler(struct drm_device *dev,
1266 struct drm_i915_private *dev_priv,
1267 u32 gt_iir)
1268{
1269
Ben Widawskycc609d52013-05-28 19:22:29 -07001270 if (gt_iir &
1271 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001272 notify_ring(dev, &dev_priv->ring[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001273 if (gt_iir & GT_BSD_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001274 notify_ring(dev, &dev_priv->ring[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001275 if (gt_iir & GT_BLT_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001276 notify_ring(dev, &dev_priv->ring[BCS]);
1277
Ben Widawskycc609d52013-05-28 19:22:29 -07001278 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1279 GT_BSD_CS_ERROR_INTERRUPT |
1280 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
Mika Kuoppala58174462014-02-25 17:11:26 +02001281 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1282 gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001283 }
Ben Widawskye3689192012-05-25 16:56:22 -07001284
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001285 if (gt_iir & GT_PARITY_ERROR(dev))
1286 ivybridge_parity_error_irq_handler(dev, gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001287}
1288
Ben Widawsky09610212014-05-15 20:58:08 +03001289static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1290{
1291 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1292 return;
1293
1294 spin_lock(&dev_priv->irq_lock);
1295 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
Imre Deaka72fbc32014-11-05 20:48:31 +02001296 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
Ben Widawsky09610212014-05-15 20:58:08 +03001297 spin_unlock(&dev_priv->irq_lock);
1298
1299 queue_work(dev_priv->wq, &dev_priv->rps.work);
1300}
1301
Ben Widawskyabd58f02013-11-02 21:07:09 -07001302static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1303 struct drm_i915_private *dev_priv,
1304 u32 master_ctl)
1305{
Thomas Daniele981e7b2014-07-24 17:04:39 +01001306 struct intel_engine_cs *ring;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001307 u32 rcs, bcs, vcs;
1308 uint32_t tmp = 0;
1309 irqreturn_t ret = IRQ_NONE;
1310
1311 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1312 tmp = I915_READ(GEN8_GT_IIR(0));
1313 if (tmp) {
Oscar Mateo38cc46d2014-06-16 16:10:59 +01001314 I915_WRITE(GEN8_GT_IIR(0), tmp);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001315 ret = IRQ_HANDLED;
Thomas Daniele981e7b2014-07-24 17:04:39 +01001316
Ben Widawskyabd58f02013-11-02 21:07:09 -07001317 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
Thomas Daniele981e7b2014-07-24 17:04:39 +01001318 ring = &dev_priv->ring[RCS];
Ben Widawskyabd58f02013-11-02 21:07:09 -07001319 if (rcs & GT_RENDER_USER_INTERRUPT)
Thomas Daniele981e7b2014-07-24 17:04:39 +01001320 notify_ring(dev, ring);
1321 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1322 intel_execlists_handle_ctx_events(ring);
1323
1324 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1325 ring = &dev_priv->ring[BCS];
Ben Widawskyabd58f02013-11-02 21:07:09 -07001326 if (bcs & GT_RENDER_USER_INTERRUPT)
Thomas Daniele981e7b2014-07-24 17:04:39 +01001327 notify_ring(dev, ring);
1328 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1329 intel_execlists_handle_ctx_events(ring);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001330 } else
1331 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1332 }
1333
Zhao Yakui85f9b5f2014-04-17 10:37:38 +08001334 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
Ben Widawskyabd58f02013-11-02 21:07:09 -07001335 tmp = I915_READ(GEN8_GT_IIR(1));
1336 if (tmp) {
Oscar Mateo38cc46d2014-06-16 16:10:59 +01001337 I915_WRITE(GEN8_GT_IIR(1), tmp);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001338 ret = IRQ_HANDLED;
Thomas Daniele981e7b2014-07-24 17:04:39 +01001339
Ben Widawskyabd58f02013-11-02 21:07:09 -07001340 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
Thomas Daniele981e7b2014-07-24 17:04:39 +01001341 ring = &dev_priv->ring[VCS];
Ben Widawskyabd58f02013-11-02 21:07:09 -07001342 if (vcs & GT_RENDER_USER_INTERRUPT)
Thomas Daniele981e7b2014-07-24 17:04:39 +01001343 notify_ring(dev, ring);
Oscar Mateo73d477f2014-07-24 17:04:31 +01001344 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
Thomas Daniele981e7b2014-07-24 17:04:39 +01001345 intel_execlists_handle_ctx_events(ring);
1346
Zhao Yakui85f9b5f2014-04-17 10:37:38 +08001347 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
Thomas Daniele981e7b2014-07-24 17:04:39 +01001348 ring = &dev_priv->ring[VCS2];
Zhao Yakui85f9b5f2014-04-17 10:37:38 +08001349 if (vcs & GT_RENDER_USER_INTERRUPT)
Thomas Daniele981e7b2014-07-24 17:04:39 +01001350 notify_ring(dev, ring);
Oscar Mateo73d477f2014-07-24 17:04:31 +01001351 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
Thomas Daniele981e7b2014-07-24 17:04:39 +01001352 intel_execlists_handle_ctx_events(ring);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001353 } else
1354 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1355 }
1356
Ben Widawsky09610212014-05-15 20:58:08 +03001357 if (master_ctl & GEN8_GT_PM_IRQ) {
1358 tmp = I915_READ(GEN8_GT_IIR(2));
1359 if (tmp & dev_priv->pm_rps_events) {
Ben Widawsky09610212014-05-15 20:58:08 +03001360 I915_WRITE(GEN8_GT_IIR(2),
1361 tmp & dev_priv->pm_rps_events);
Oscar Mateo38cc46d2014-06-16 16:10:59 +01001362 ret = IRQ_HANDLED;
1363 gen8_rps_irq_handler(dev_priv, tmp);
Ben Widawsky09610212014-05-15 20:58:08 +03001364 } else
1365 DRM_ERROR("The master control interrupt lied (PM)!\n");
1366 }
1367
Ben Widawskyabd58f02013-11-02 21:07:09 -07001368 if (master_ctl & GEN8_GT_VECS_IRQ) {
1369 tmp = I915_READ(GEN8_GT_IIR(3));
1370 if (tmp) {
Oscar Mateo38cc46d2014-06-16 16:10:59 +01001371 I915_WRITE(GEN8_GT_IIR(3), tmp);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001372 ret = IRQ_HANDLED;
Thomas Daniele981e7b2014-07-24 17:04:39 +01001373
Ben Widawskyabd58f02013-11-02 21:07:09 -07001374 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
Thomas Daniele981e7b2014-07-24 17:04:39 +01001375 ring = &dev_priv->ring[VECS];
Ben Widawskyabd58f02013-11-02 21:07:09 -07001376 if (vcs & GT_RENDER_USER_INTERRUPT)
Thomas Daniele981e7b2014-07-24 17:04:39 +01001377 notify_ring(dev, ring);
Oscar Mateo73d477f2014-07-24 17:04:31 +01001378 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
Thomas Daniele981e7b2014-07-24 17:04:39 +01001379 intel_execlists_handle_ctx_events(ring);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001380 } else
1381 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1382 }
1383
1384 return ret;
1385}
1386
Egbert Eichb543fb02013-04-16 13:36:54 +02001387#define HPD_STORM_DETECT_PERIOD 1000
1388#define HPD_STORM_THRESHOLD 5
1389
Jani Nikula07c338c2014-10-02 11:16:32 +03001390static int pch_port_to_hotplug_shift(enum port port)
Dave Airlie13cf5502014-06-18 11:29:35 +10001391{
1392 switch (port) {
1393 case PORT_A:
1394 case PORT_E:
1395 default:
1396 return -1;
1397 case PORT_B:
1398 return 0;
1399 case PORT_C:
1400 return 8;
1401 case PORT_D:
1402 return 16;
1403 }
1404}
1405
Jani Nikula07c338c2014-10-02 11:16:32 +03001406static int i915_port_to_hotplug_shift(enum port port)
Dave Airlie13cf5502014-06-18 11:29:35 +10001407{
1408 switch (port) {
1409 case PORT_A:
1410 case PORT_E:
1411 default:
1412 return -1;
1413 case PORT_B:
1414 return 17;
1415 case PORT_C:
1416 return 19;
1417 case PORT_D:
1418 return 21;
1419 }
1420}
1421
1422static inline enum port get_port_from_pin(enum hpd_pin pin)
1423{
1424 switch (pin) {
1425 case HPD_PORT_B:
1426 return PORT_B;
1427 case HPD_PORT_C:
1428 return PORT_C;
1429 case HPD_PORT_D:
1430 return PORT_D;
1431 default:
1432 return PORT_A; /* no hpd */
1433 }
1434}
1435
Daniel Vetter10a504d2013-06-27 17:52:12 +02001436static inline void intel_hpd_irq_handler(struct drm_device *dev,
Daniel Vetter22062db2013-06-27 17:52:11 +02001437 u32 hotplug_trigger,
Dave Airlie13cf5502014-06-18 11:29:35 +10001438 u32 dig_hotplug_reg,
Daniel Vetter22062db2013-06-27 17:52:11 +02001439 const u32 *hpd)
Egbert Eichb543fb02013-04-16 13:36:54 +02001440{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001441 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eichb543fb02013-04-16 13:36:54 +02001442 int i;
Dave Airlie13cf5502014-06-18 11:29:35 +10001443 enum port port;
Daniel Vetter10a504d2013-06-27 17:52:12 +02001444 bool storm_detected = false;
Dave Airlie13cf5502014-06-18 11:29:35 +10001445 bool queue_dig = false, queue_hp = false;
1446 u32 dig_shift;
1447 u32 dig_port_mask = 0;
Egbert Eichb543fb02013-04-16 13:36:54 +02001448
Daniel Vetter91d131d2013-06-27 17:52:14 +02001449 if (!hotplug_trigger)
1450 return;
1451
Dave Airlie13cf5502014-06-18 11:29:35 +10001452 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1453 hotplug_trigger, dig_hotplug_reg);
Imre Deakcc9bd492014-01-16 19:56:54 +02001454
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02001455 spin_lock(&dev_priv->irq_lock);
Egbert Eichb543fb02013-04-16 13:36:54 +02001456 for (i = 1; i < HPD_NUM_PINS; i++) {
Dave Airlie13cf5502014-06-18 11:29:35 +10001457 if (!(hpd[i] & hotplug_trigger))
1458 continue;
Egbert Eich821450c2013-04-16 13:36:55 +02001459
Dave Airlie13cf5502014-06-18 11:29:35 +10001460 port = get_port_from_pin(i);
1461 if (port && dev_priv->hpd_irq_port[port]) {
1462 bool long_hpd;
1463
Jani Nikula07c338c2014-10-02 11:16:32 +03001464 if (HAS_PCH_SPLIT(dev)) {
1465 dig_shift = pch_port_to_hotplug_shift(port);
Dave Airlie13cf5502014-06-18 11:29:35 +10001466 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
Jani Nikula07c338c2014-10-02 11:16:32 +03001467 } else {
1468 dig_shift = i915_port_to_hotplug_shift(port);
1469 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
Dave Airlie13cf5502014-06-18 11:29:35 +10001470 }
1471
Ville Syrjälä26fbb772014-08-11 18:37:37 +03001472 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1473 port_name(port),
1474 long_hpd ? "long" : "short");
Dave Airlie13cf5502014-06-18 11:29:35 +10001475 /* for long HPD pulses we want to have the digital queue happen,
1476 but we still want HPD storm detection to function. */
1477 if (long_hpd) {
1478 dev_priv->long_hpd_port_mask |= (1 << port);
1479 dig_port_mask |= hpd[i];
1480 } else {
1481 /* for short HPD just trigger the digital queue */
1482 dev_priv->short_hpd_port_mask |= (1 << port);
1483 hotplug_trigger &= ~hpd[i];
1484 }
1485 queue_dig = true;
1486 }
1487 }
1488
1489 for (i = 1; i < HPD_NUM_PINS; i++) {
Daniel Vetter3ff04a162014-04-24 12:03:17 +02001490 if (hpd[i] & hotplug_trigger &&
1491 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1492 /*
1493 * On GMCH platforms the interrupt mask bits only
1494 * prevent irq generation, not the setting of the
1495 * hotplug bits itself. So only WARN about unexpected
1496 * interrupts on saner platforms.
1497 */
1498 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1499 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1500 hotplug_trigger, i, hpd[i]);
1501
1502 continue;
1503 }
Egbert Eichb8f102e2013-07-26 14:14:24 +02001504
Egbert Eichb543fb02013-04-16 13:36:54 +02001505 if (!(hpd[i] & hotplug_trigger) ||
1506 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1507 continue;
1508
Dave Airlie13cf5502014-06-18 11:29:35 +10001509 if (!(dig_port_mask & hpd[i])) {
1510 dev_priv->hpd_event_bits |= (1 << i);
1511 queue_hp = true;
1512 }
1513
Egbert Eichb543fb02013-04-16 13:36:54 +02001514 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1515 dev_priv->hpd_stats[i].hpd_last_jiffies
1516 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1517 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1518 dev_priv->hpd_stats[i].hpd_cnt = 0;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001519 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001520 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1521 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
Egbert Eich142e2392013-04-11 15:57:57 +02001522 dev_priv->hpd_event_bits &= ~(1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001523 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
Daniel Vetter10a504d2013-06-27 17:52:12 +02001524 storm_detected = true;
Egbert Eichb543fb02013-04-16 13:36:54 +02001525 } else {
1526 dev_priv->hpd_stats[i].hpd_cnt++;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001527 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1528 dev_priv->hpd_stats[i].hpd_cnt);
Egbert Eichb543fb02013-04-16 13:36:54 +02001529 }
1530 }
1531
Daniel Vetter10a504d2013-06-27 17:52:12 +02001532 if (storm_detected)
1533 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02001534 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter5876fa02013-06-27 17:52:13 +02001535
Daniel Vetter645416f2013-09-02 16:22:25 +02001536 /*
1537 * Our hotplug handler can grab modeset locks (by calling down into the
1538 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1539 * queue for otherwise the flush_work in the pageflip code will
1540 * deadlock.
1541 */
Dave Airlie13cf5502014-06-18 11:29:35 +10001542 if (queue_dig)
Dave Airlie0e32b392014-05-02 14:02:48 +10001543 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
Dave Airlie13cf5502014-06-18 11:29:35 +10001544 if (queue_hp)
1545 schedule_work(&dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02001546}
1547
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001548static void gmbus_irq_handler(struct drm_device *dev)
1549{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001550 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter28c70f12012-12-01 13:53:45 +01001551
Daniel Vetter28c70f12012-12-01 13:53:45 +01001552 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001553}
1554
Daniel Vetterce99c252012-12-01 13:53:47 +01001555static void dp_aux_irq_handler(struct drm_device *dev)
1556{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001557 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001558
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001559 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +01001560}
1561
Shuang He8bf1e9f2013-10-15 18:55:27 +01001562#if defined(CONFIG_DEBUG_FS)
Daniel Vetter277de952013-10-18 16:37:07 +02001563static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1564 uint32_t crc0, uint32_t crc1,
1565 uint32_t crc2, uint32_t crc3,
1566 uint32_t crc4)
Shuang He8bf1e9f2013-10-15 18:55:27 +01001567{
1568 struct drm_i915_private *dev_priv = dev->dev_private;
1569 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1570 struct intel_pipe_crc_entry *entry;
Damien Lespiauac2300d2013-10-15 18:55:30 +01001571 int head, tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001572
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001573 spin_lock(&pipe_crc->lock);
1574
Damien Lespiau0c912c72013-10-15 18:55:37 +01001575 if (!pipe_crc->entries) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001576 spin_unlock(&pipe_crc->lock);
Damien Lespiau0c912c72013-10-15 18:55:37 +01001577 DRM_ERROR("spurious interrupt\n");
1578 return;
1579 }
1580
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001581 head = pipe_crc->head;
1582 tail = pipe_crc->tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001583
1584 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001585 spin_unlock(&pipe_crc->lock);
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001586 DRM_ERROR("CRC buffer overflowing\n");
1587 return;
1588 }
1589
1590 entry = &pipe_crc->entries[head];
Shuang He8bf1e9f2013-10-15 18:55:27 +01001591
Daniel Vetter8bc5e952013-10-16 22:55:49 +02001592 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
Daniel Vettereba94eb2013-10-16 22:55:46 +02001593 entry->crc[0] = crc0;
1594 entry->crc[1] = crc1;
1595 entry->crc[2] = crc2;
1596 entry->crc[3] = crc3;
1597 entry->crc[4] = crc4;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001598
1599 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001600 pipe_crc->head = head;
1601
1602 spin_unlock(&pipe_crc->lock);
Damien Lespiau07144422013-10-15 18:55:40 +01001603
1604 wake_up_interruptible(&pipe_crc->wq);
Shuang He8bf1e9f2013-10-15 18:55:27 +01001605}
Daniel Vetter277de952013-10-18 16:37:07 +02001606#else
1607static inline void
1608display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1609 uint32_t crc0, uint32_t crc1,
1610 uint32_t crc2, uint32_t crc3,
1611 uint32_t crc4) {}
1612#endif
Daniel Vettereba94eb2013-10-16 22:55:46 +02001613
Daniel Vetter277de952013-10-18 16:37:07 +02001614
1615static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vetter5a69b892013-10-16 22:55:52 +02001616{
1617 struct drm_i915_private *dev_priv = dev->dev_private;
1618
Daniel Vetter277de952013-10-18 16:37:07 +02001619 display_pipe_crc_irq_handler(dev, pipe,
1620 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1621 0, 0, 0, 0);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001622}
1623
Daniel Vetter277de952013-10-18 16:37:07 +02001624static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vettereba94eb2013-10-16 22:55:46 +02001625{
1626 struct drm_i915_private *dev_priv = dev->dev_private;
1627
Daniel Vetter277de952013-10-18 16:37:07 +02001628 display_pipe_crc_irq_handler(dev, pipe,
1629 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1630 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1631 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1632 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1633 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
Daniel Vettereba94eb2013-10-16 22:55:46 +02001634}
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001635
Daniel Vetter277de952013-10-18 16:37:07 +02001636static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001637{
1638 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001639 uint32_t res1, res2;
1640
1641 if (INTEL_INFO(dev)->gen >= 3)
1642 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1643 else
1644 res1 = 0;
1645
1646 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1647 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1648 else
1649 res2 = 0;
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001650
Daniel Vetter277de952013-10-18 16:37:07 +02001651 display_pipe_crc_irq_handler(dev, pipe,
1652 I915_READ(PIPE_CRC_RES_RED(pipe)),
1653 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1654 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1655 res1, res2);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001656}
Shuang He8bf1e9f2013-10-15 18:55:27 +01001657
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001658/* The RPS events need forcewake, so we add them to a work queue and mask their
1659 * IMR bits until the work is done. Other interrupts can be processed without
1660 * the work queue. */
1661static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
Ben Widawskybaf02a12013-05-28 19:22:24 -07001662{
Deepak Sa6706b42014-03-15 20:23:22 +05301663 if (pm_iir & dev_priv->pm_rps_events) {
Daniel Vetter59cdb632013-07-04 23:35:28 +02001664 spin_lock(&dev_priv->irq_lock);
Deepak Sa6706b42014-03-15 20:23:22 +05301665 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
Daniel Vetter480c8032014-07-16 09:49:40 +02001666 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
Daniel Vetter59cdb632013-07-04 23:35:28 +02001667 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter2adbee62013-07-04 23:35:27 +02001668
1669 queue_work(dev_priv->wq, &dev_priv->rps.work);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001670 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001671
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001672 if (HAS_VEBOX(dev_priv->dev)) {
1673 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1674 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
Ben Widawsky12638c52013-05-28 19:22:31 -07001675
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001676 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
Mika Kuoppala58174462014-02-25 17:11:26 +02001677 i915_handle_error(dev_priv->dev, false,
1678 "VEBOX CS error interrupt 0x%08x",
1679 pm_iir);
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001680 }
Ben Widawsky12638c52013-05-28 19:22:31 -07001681 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001682}
1683
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03001684static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1685{
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03001686 if (!drm_handle_vblank(dev, pipe))
1687 return false;
1688
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03001689 return true;
1690}
1691
Imre Deakc1874ed2014-02-04 21:35:46 +02001692static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1693{
1694 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak91d181d2014-02-10 18:42:49 +02001695 u32 pipe_stats[I915_MAX_PIPES] = { };
Imre Deakc1874ed2014-02-04 21:35:46 +02001696 int pipe;
1697
Imre Deak58ead0d2014-02-04 21:35:47 +02001698 spin_lock(&dev_priv->irq_lock);
Damien Lespiau055e3932014-08-18 13:49:10 +01001699 for_each_pipe(dev_priv, pipe) {
Imre Deak91d181d2014-02-10 18:42:49 +02001700 int reg;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001701 u32 mask, iir_bit = 0;
Imre Deak91d181d2014-02-10 18:42:49 +02001702
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001703 /*
1704 * PIPESTAT bits get signalled even when the interrupt is
1705 * disabled with the mask bits, and some of the status bits do
1706 * not generate interrupts at all (like the underrun bit). Hence
1707 * we need to be careful that we only handle what we want to
1708 * handle.
1709 */
Daniel Vetter0f239f42014-09-30 10:56:49 +02001710
1711 /* fifo underruns are filterered in the underrun handler. */
1712 mask = PIPE_FIFO_UNDERRUN_STATUS;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001713
1714 switch (pipe) {
1715 case PIPE_A:
1716 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1717 break;
1718 case PIPE_B:
1719 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1720 break;
Ville Syrjälä3278f672014-04-09 13:28:49 +03001721 case PIPE_C:
1722 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1723 break;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001724 }
1725 if (iir & iir_bit)
1726 mask |= dev_priv->pipestat_irq_mask[pipe];
1727
1728 if (!mask)
Imre Deak91d181d2014-02-10 18:42:49 +02001729 continue;
1730
1731 reg = PIPESTAT(pipe);
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001732 mask |= PIPESTAT_INT_ENABLE_MASK;
1733 pipe_stats[pipe] = I915_READ(reg) & mask;
Imre Deakc1874ed2014-02-04 21:35:46 +02001734
1735 /*
1736 * Clear the PIPE*STAT regs before the IIR
1737 */
Imre Deak91d181d2014-02-10 18:42:49 +02001738 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1739 PIPESTAT_INT_STATUS_MASK))
Imre Deakc1874ed2014-02-04 21:35:46 +02001740 I915_WRITE(reg, pipe_stats[pipe]);
1741 }
Imre Deak58ead0d2014-02-04 21:35:47 +02001742 spin_unlock(&dev_priv->irq_lock);
Imre Deakc1874ed2014-02-04 21:35:46 +02001743
Damien Lespiau055e3932014-08-18 13:49:10 +01001744 for_each_pipe(dev_priv, pipe) {
Chris Wilsond6bbafa2014-09-05 07:13:24 +01001745 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1746 intel_pipe_handle_vblank(dev, pipe))
1747 intel_check_page_flip(dev, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001748
Imre Deak579a9b02014-02-04 21:35:48 +02001749 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
Imre Deakc1874ed2014-02-04 21:35:46 +02001750 intel_prepare_page_flip(dev, pipe);
1751 intel_finish_page_flip(dev, pipe);
1752 }
1753
1754 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1755 i9xx_pipe_crc_irq_handler(dev, pipe);
1756
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001757 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1758 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001759 }
1760
1761 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1762 gmbus_irq_handler(dev);
1763}
1764
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001765static void i9xx_hpd_irq_handler(struct drm_device *dev)
1766{
1767 struct drm_i915_private *dev_priv = dev->dev_private;
1768 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1769
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001770 if (hotplug_status) {
1771 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1772 /*
1773 * Make sure hotplug status is cleared before we clear IIR, or else we
1774 * may miss hotplug events.
1775 */
1776 POSTING_READ(PORT_HOTPLUG_STAT);
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001777
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001778 if (IS_G4X(dev)) {
1779 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001780
Dave Airlie13cf5502014-06-18 11:29:35 +10001781 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001782 } else {
1783 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1784
Dave Airlie13cf5502014-06-18 11:29:35 +10001785 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001786 }
1787
1788 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1789 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1790 dp_aux_irq_handler(dev);
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001791 }
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001792}
1793
Daniel Vetterff1f5252012-10-02 15:10:55 +02001794static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001795{
Daniel Vetter45a83f82014-05-12 19:17:55 +02001796 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03001797 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001798 u32 iir, gt_iir, pm_iir;
1799 irqreturn_t ret = IRQ_NONE;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001800
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001801 while (true) {
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001802 /* Find, clear, then process each source of interrupt */
1803
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001804 gt_iir = I915_READ(GTIIR);
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001805 if (gt_iir)
1806 I915_WRITE(GTIIR, gt_iir);
1807
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001808 pm_iir = I915_READ(GEN6_PMIIR);
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001809 if (pm_iir)
1810 I915_WRITE(GEN6_PMIIR, pm_iir);
1811
1812 iir = I915_READ(VLV_IIR);
1813 if (iir) {
1814 /* Consume port before clearing IIR or we'll miss events */
1815 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1816 i9xx_hpd_irq_handler(dev);
1817 I915_WRITE(VLV_IIR, iir);
1818 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001819
1820 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1821 goto out;
1822
1823 ret = IRQ_HANDLED;
1824
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001825 if (gt_iir)
1826 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Paulo Zanoni60611c12013-08-15 11:50:01 -03001827 if (pm_iir)
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001828 gen6_rps_irq_handler(dev_priv, pm_iir);
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001829 /* Call regardless, as some status bits might not be
1830 * signalled in iir */
1831 valleyview_pipestat_irq_handler(dev, iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001832 }
1833
1834out:
1835 return ret;
1836}
1837
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001838static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1839{
Daniel Vetter45a83f82014-05-12 19:17:55 +02001840 struct drm_device *dev = arg;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001841 struct drm_i915_private *dev_priv = dev->dev_private;
1842 u32 master_ctl, iir;
1843 irqreturn_t ret = IRQ_NONE;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001844
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001845 for (;;) {
1846 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1847 iir = I915_READ(VLV_IIR);
Ville Syrjälä3278f672014-04-09 13:28:49 +03001848
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001849 if (master_ctl == 0 && iir == 0)
1850 break;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001851
Oscar Mateo27b6c122014-06-16 16:11:00 +01001852 ret = IRQ_HANDLED;
1853
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001854 I915_WRITE(GEN8_MASTER_IRQ, 0);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001855
Oscar Mateo27b6c122014-06-16 16:11:00 +01001856 /* Find, clear, then process each source of interrupt */
1857
1858 if (iir) {
1859 /* Consume port before clearing IIR or we'll miss events */
1860 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1861 i9xx_hpd_irq_handler(dev);
1862 I915_WRITE(VLV_IIR, iir);
1863 }
1864
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001865 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001866
Oscar Mateo27b6c122014-06-16 16:11:00 +01001867 /* Call regardless, as some status bits might not be
1868 * signalled in iir */
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001869 valleyview_pipestat_irq_handler(dev, iir);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001870
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001871 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1872 POSTING_READ(GEN8_MASTER_IRQ);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001873 }
1874
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001875 return ret;
1876}
1877
Adam Jackson23e81d62012-06-06 15:45:44 -04001878static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08001879{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001880 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001881 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001882 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Dave Airlie13cf5502014-06-18 11:29:35 +10001883 u32 dig_hotplug_reg;
Jesse Barnes776ad802011-01-04 15:09:39 -08001884
Dave Airlie13cf5502014-06-18 11:29:35 +10001885 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1886 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1887
1888 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
Daniel Vetter91d131d2013-06-27 17:52:14 +02001889
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001890 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1891 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1892 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08001893 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001894 port_name(port));
1895 }
Jesse Barnes776ad802011-01-04 15:09:39 -08001896
Daniel Vetterce99c252012-12-01 13:53:47 +01001897 if (pch_iir & SDE_AUX_MASK)
1898 dp_aux_irq_handler(dev);
1899
Jesse Barnes776ad802011-01-04 15:09:39 -08001900 if (pch_iir & SDE_GMBUS)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001901 gmbus_irq_handler(dev);
Jesse Barnes776ad802011-01-04 15:09:39 -08001902
1903 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1904 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1905
1906 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1907 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1908
1909 if (pch_iir & SDE_POISON)
1910 DRM_ERROR("PCH poison interrupt\n");
1911
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001912 if (pch_iir & SDE_FDI_MASK)
Damien Lespiau055e3932014-08-18 13:49:10 +01001913 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001914 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1915 pipe_name(pipe),
1916 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08001917
1918 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1919 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1920
1921 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1922 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1923
Jesse Barnes776ad802011-01-04 15:09:39 -08001924 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001925 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
Paulo Zanoni86642812013-04-12 17:57:57 -03001926
1927 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001928 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
Paulo Zanoni86642812013-04-12 17:57:57 -03001929}
1930
1931static void ivb_err_int_handler(struct drm_device *dev)
1932{
1933 struct drm_i915_private *dev_priv = dev->dev_private;
1934 u32 err_int = I915_READ(GEN7_ERR_INT);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001935 enum pipe pipe;
Paulo Zanoni86642812013-04-12 17:57:57 -03001936
Paulo Zanonide032bf2013-04-12 17:57:58 -03001937 if (err_int & ERR_INT_POISON)
1938 DRM_ERROR("Poison interrupt\n");
1939
Damien Lespiau055e3932014-08-18 13:49:10 +01001940 for_each_pipe(dev_priv, pipe) {
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001941 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1942 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Paulo Zanoni86642812013-04-12 17:57:57 -03001943
Daniel Vetter5a69b892013-10-16 22:55:52 +02001944 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1945 if (IS_IVYBRIDGE(dev))
Daniel Vetter277de952013-10-18 16:37:07 +02001946 ivb_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001947 else
Daniel Vetter277de952013-10-18 16:37:07 +02001948 hsw_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001949 }
1950 }
Shuang He8bf1e9f2013-10-15 18:55:27 +01001951
Paulo Zanoni86642812013-04-12 17:57:57 -03001952 I915_WRITE(GEN7_ERR_INT, err_int);
1953}
1954
1955static void cpt_serr_int_handler(struct drm_device *dev)
1956{
1957 struct drm_i915_private *dev_priv = dev->dev_private;
1958 u32 serr_int = I915_READ(SERR_INT);
1959
Paulo Zanonide032bf2013-04-12 17:57:58 -03001960 if (serr_int & SERR_INT_POISON)
1961 DRM_ERROR("PCH poison interrupt\n");
1962
Paulo Zanoni86642812013-04-12 17:57:57 -03001963 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001964 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
Paulo Zanoni86642812013-04-12 17:57:57 -03001965
1966 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001967 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
Paulo Zanoni86642812013-04-12 17:57:57 -03001968
1969 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001970 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
Paulo Zanoni86642812013-04-12 17:57:57 -03001971
1972 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08001973}
1974
Adam Jackson23e81d62012-06-06 15:45:44 -04001975static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1976{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001977 struct drm_i915_private *dev_priv = dev->dev_private;
Adam Jackson23e81d62012-06-06 15:45:44 -04001978 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001979 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Dave Airlie13cf5502014-06-18 11:29:35 +10001980 u32 dig_hotplug_reg;
Adam Jackson23e81d62012-06-06 15:45:44 -04001981
Dave Airlie13cf5502014-06-18 11:29:35 +10001982 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1983 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1984
1985 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
Daniel Vetter91d131d2013-06-27 17:52:14 +02001986
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001987 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1988 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1989 SDE_AUDIO_POWER_SHIFT_CPT);
1990 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1991 port_name(port));
1992 }
Adam Jackson23e81d62012-06-06 15:45:44 -04001993
1994 if (pch_iir & SDE_AUX_MASK_CPT)
Daniel Vetterce99c252012-12-01 13:53:47 +01001995 dp_aux_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001996
1997 if (pch_iir & SDE_GMBUS_CPT)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001998 gmbus_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001999
2000 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2001 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2002
2003 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2004 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2005
2006 if (pch_iir & SDE_FDI_MASK_CPT)
Damien Lespiau055e3932014-08-18 13:49:10 +01002007 for_each_pipe(dev_priv, pipe)
Adam Jackson23e81d62012-06-06 15:45:44 -04002008 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2009 pipe_name(pipe),
2010 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03002011
2012 if (pch_iir & SDE_ERROR_CPT)
2013 cpt_serr_int_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04002014}
2015
Paulo Zanonic008bc62013-07-12 16:35:10 -03002016static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2017{
2018 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter40da17c2013-10-21 18:04:36 +02002019 enum pipe pipe;
Paulo Zanonic008bc62013-07-12 16:35:10 -03002020
2021 if (de_iir & DE_AUX_CHANNEL_A)
2022 dp_aux_irq_handler(dev);
2023
2024 if (de_iir & DE_GSE)
2025 intel_opregion_asle_intr(dev);
2026
Paulo Zanonic008bc62013-07-12 16:35:10 -03002027 if (de_iir & DE_POISON)
2028 DRM_ERROR("Poison interrupt\n");
2029
Damien Lespiau055e3932014-08-18 13:49:10 +01002030 for_each_pipe(dev_priv, pipe) {
Chris Wilsond6bbafa2014-09-05 07:13:24 +01002031 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2032 intel_pipe_handle_vblank(dev, pipe))
2033 intel_check_page_flip(dev, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002034
Daniel Vetter40da17c2013-10-21 18:04:36 +02002035 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002036 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002037
Daniel Vetter40da17c2013-10-21 18:04:36 +02002038 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2039 i9xx_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02002040
Daniel Vetter40da17c2013-10-21 18:04:36 +02002041 /* plane/pipes map 1:1 on ilk+ */
2042 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2043 intel_prepare_page_flip(dev, pipe);
2044 intel_finish_page_flip_plane(dev, pipe);
2045 }
Paulo Zanonic008bc62013-07-12 16:35:10 -03002046 }
2047
2048 /* check event from PCH */
2049 if (de_iir & DE_PCH_EVENT) {
2050 u32 pch_iir = I915_READ(SDEIIR);
2051
2052 if (HAS_PCH_CPT(dev))
2053 cpt_irq_handler(dev, pch_iir);
2054 else
2055 ibx_irq_handler(dev, pch_iir);
2056
2057 /* should clear PCH hotplug event before clear CPU irq */
2058 I915_WRITE(SDEIIR, pch_iir);
2059 }
2060
2061 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2062 ironlake_rps_change_irq_handler(dev);
2063}
2064
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002065static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2066{
2067 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiau07d27e22014-03-03 17:31:46 +00002068 enum pipe pipe;
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002069
2070 if (de_iir & DE_ERR_INT_IVB)
2071 ivb_err_int_handler(dev);
2072
2073 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2074 dp_aux_irq_handler(dev);
2075
2076 if (de_iir & DE_GSE_IVB)
2077 intel_opregion_asle_intr(dev);
2078
Damien Lespiau055e3932014-08-18 13:49:10 +01002079 for_each_pipe(dev_priv, pipe) {
Chris Wilsond6bbafa2014-09-05 07:13:24 +01002080 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2081 intel_pipe_handle_vblank(dev, pipe))
2082 intel_check_page_flip(dev, pipe);
Daniel Vetter40da17c2013-10-21 18:04:36 +02002083
2084 /* plane/pipes map 1:1 on ilk+ */
Damien Lespiau07d27e22014-03-03 17:31:46 +00002085 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2086 intel_prepare_page_flip(dev, pipe);
2087 intel_finish_page_flip_plane(dev, pipe);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002088 }
2089 }
2090
2091 /* check event from PCH */
2092 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2093 u32 pch_iir = I915_READ(SDEIIR);
2094
2095 cpt_irq_handler(dev, pch_iir);
2096
2097 /* clear PCH hotplug event before clear CPU irq */
2098 I915_WRITE(SDEIIR, pch_iir);
2099 }
2100}
2101
Oscar Mateo72c90f62014-06-16 16:10:57 +01002102/*
2103 * To handle irqs with the minimum potential races with fresh interrupts, we:
2104 * 1 - Disable Master Interrupt Control.
2105 * 2 - Find the source(s) of the interrupt.
2106 * 3 - Clear the Interrupt Identity bits (IIR).
2107 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2108 * 5 - Re-enable Master Interrupt Control.
2109 */
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002110static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002111{
Daniel Vetter45a83f82014-05-12 19:17:55 +02002112 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03002113 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002114 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01002115 irqreturn_t ret = IRQ_NONE;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002116
Paulo Zanoni86642812013-04-12 17:57:57 -03002117 /* We get interrupts on unclaimed registers, so check for this before we
2118 * do any I915_{READ,WRITE}. */
Chris Wilson907b28c2013-07-19 20:36:52 +01002119 intel_uncore_check_errors(dev);
Paulo Zanoni86642812013-04-12 17:57:57 -03002120
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002121 /* disable master interrupt before clearing iir */
2122 de_ier = I915_READ(DEIER);
2123 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Paulo Zanoni23a78512013-07-12 16:35:14 -03002124 POSTING_READ(DEIER);
Chris Wilson0e434062012-05-09 21:45:44 +01002125
Paulo Zanoni44498ae2013-02-22 17:05:28 -03002126 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2127 * interrupts will will be stored on its back queue, and then we'll be
2128 * able to process them after we restore SDEIER (as soon as we restore
2129 * it, we'll get an interrupt if SDEIIR still has something to process
2130 * due to its back queue). */
Ben Widawskyab5c6082013-04-05 13:12:41 -07002131 if (!HAS_PCH_NOP(dev)) {
2132 sde_ier = I915_READ(SDEIER);
2133 I915_WRITE(SDEIER, 0);
2134 POSTING_READ(SDEIER);
2135 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03002136
Oscar Mateo72c90f62014-06-16 16:10:57 +01002137 /* Find, clear, then process each source of interrupt */
2138
Chris Wilson0e434062012-05-09 21:45:44 +01002139 gt_iir = I915_READ(GTIIR);
2140 if (gt_iir) {
Oscar Mateo72c90f62014-06-16 16:10:57 +01002141 I915_WRITE(GTIIR, gt_iir);
2142 ret = IRQ_HANDLED;
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03002143 if (INTEL_INFO(dev)->gen >= 6)
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002144 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03002145 else
2146 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01002147 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002148
2149 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01002150 if (de_iir) {
Oscar Mateo72c90f62014-06-16 16:10:57 +01002151 I915_WRITE(DEIIR, de_iir);
2152 ret = IRQ_HANDLED;
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002153 if (INTEL_INFO(dev)->gen >= 7)
2154 ivb_display_irq_handler(dev, de_iir);
2155 else
2156 ilk_display_irq_handler(dev, de_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01002157 }
2158
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002159 if (INTEL_INFO(dev)->gen >= 6) {
2160 u32 pm_iir = I915_READ(GEN6_PMIIR);
2161 if (pm_iir) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002162 I915_WRITE(GEN6_PMIIR, pm_iir);
2163 ret = IRQ_HANDLED;
Oscar Mateo72c90f62014-06-16 16:10:57 +01002164 gen6_rps_irq_handler(dev_priv, pm_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002165 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002166 }
2167
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002168 I915_WRITE(DEIER, de_ier);
2169 POSTING_READ(DEIER);
Ben Widawskyab5c6082013-04-05 13:12:41 -07002170 if (!HAS_PCH_NOP(dev)) {
2171 I915_WRITE(SDEIER, sde_ier);
2172 POSTING_READ(SDEIER);
2173 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002174
2175 return ret;
2176}
2177
Ben Widawskyabd58f02013-11-02 21:07:09 -07002178static irqreturn_t gen8_irq_handler(int irq, void *arg)
2179{
2180 struct drm_device *dev = arg;
2181 struct drm_i915_private *dev_priv = dev->dev_private;
2182 u32 master_ctl;
2183 irqreturn_t ret = IRQ_NONE;
2184 uint32_t tmp = 0;
Daniel Vetterc42664c2013-11-07 11:05:40 +01002185 enum pipe pipe;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002186
Ben Widawskyabd58f02013-11-02 21:07:09 -07002187 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2188 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2189 if (!master_ctl)
2190 return IRQ_NONE;
2191
2192 I915_WRITE(GEN8_MASTER_IRQ, 0);
2193 POSTING_READ(GEN8_MASTER_IRQ);
2194
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002195 /* Find, clear, then process each source of interrupt */
2196
Ben Widawskyabd58f02013-11-02 21:07:09 -07002197 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2198
2199 if (master_ctl & GEN8_DE_MISC_IRQ) {
2200 tmp = I915_READ(GEN8_DE_MISC_IIR);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002201 if (tmp) {
2202 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2203 ret = IRQ_HANDLED;
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002204 if (tmp & GEN8_DE_MISC_GSE)
2205 intel_opregion_asle_intr(dev);
2206 else
2207 DRM_ERROR("Unexpected DE Misc interrupt\n");
Ben Widawskyabd58f02013-11-02 21:07:09 -07002208 }
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002209 else
2210 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
Ben Widawskyabd58f02013-11-02 21:07:09 -07002211 }
2212
Daniel Vetter6d766f02013-11-07 14:49:55 +01002213 if (master_ctl & GEN8_DE_PORT_IRQ) {
2214 tmp = I915_READ(GEN8_DE_PORT_IIR);
Daniel Vetter6d766f02013-11-07 14:49:55 +01002215 if (tmp) {
2216 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2217 ret = IRQ_HANDLED;
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002218 if (tmp & GEN8_AUX_CHANNEL_A)
2219 dp_aux_irq_handler(dev);
2220 else
2221 DRM_ERROR("Unexpected DE Port interrupt\n");
Daniel Vetter6d766f02013-11-07 14:49:55 +01002222 }
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002223 else
2224 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
Daniel Vetter6d766f02013-11-07 14:49:55 +01002225 }
2226
Damien Lespiau055e3932014-08-18 13:49:10 +01002227 for_each_pipe(dev_priv, pipe) {
Damien Lespiau770de832014-03-20 20:45:01 +00002228 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002229
Daniel Vetterc42664c2013-11-07 11:05:40 +01002230 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2231 continue;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002232
Daniel Vetterc42664c2013-11-07 11:05:40 +01002233 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
Daniel Vetterc42664c2013-11-07 11:05:40 +01002234 if (pipe_iir) {
2235 ret = IRQ_HANDLED;
2236 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
Damien Lespiau770de832014-03-20 20:45:01 +00002237
Chris Wilsond6bbafa2014-09-05 07:13:24 +01002238 if (pipe_iir & GEN8_PIPE_VBLANK &&
2239 intel_pipe_handle_vblank(dev, pipe))
2240 intel_check_page_flip(dev, pipe);
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002241
Damien Lespiau770de832014-03-20 20:45:01 +00002242 if (IS_GEN9(dev))
2243 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2244 else
2245 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2246
2247 if (flip_done) {
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002248 intel_prepare_page_flip(dev, pipe);
2249 intel_finish_page_flip_plane(dev, pipe);
2250 }
2251
2252 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2253 hsw_pipe_crc_irq_handler(dev, pipe);
2254
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002255 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2256 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2257 pipe);
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002258
Damien Lespiau770de832014-03-20 20:45:01 +00002259
2260 if (IS_GEN9(dev))
2261 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2262 else
2263 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2264
2265 if (fault_errors)
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002266 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2267 pipe_name(pipe),
2268 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
Daniel Vetterc42664c2013-11-07 11:05:40 +01002269 } else
Ben Widawskyabd58f02013-11-02 21:07:09 -07002270 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2271 }
2272
Daniel Vetter92d03a82013-11-07 11:05:43 +01002273 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2274 /*
2275 * FIXME(BDW): Assume for now that the new interrupt handling
2276 * scheme also closed the SDE interrupt handling race we've seen
2277 * on older pch-split platforms. But this needs testing.
2278 */
2279 u32 pch_iir = I915_READ(SDEIIR);
Daniel Vetter92d03a82013-11-07 11:05:43 +01002280 if (pch_iir) {
2281 I915_WRITE(SDEIIR, pch_iir);
2282 ret = IRQ_HANDLED;
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002283 cpt_irq_handler(dev, pch_iir);
2284 } else
2285 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2286
Daniel Vetter92d03a82013-11-07 11:05:43 +01002287 }
2288
Ben Widawskyabd58f02013-11-02 21:07:09 -07002289 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2290 POSTING_READ(GEN8_MASTER_IRQ);
2291
2292 return ret;
2293}
2294
Daniel Vetter17e1df02013-09-08 21:57:13 +02002295static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2296 bool reset_completed)
2297{
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002298 struct intel_engine_cs *ring;
Daniel Vetter17e1df02013-09-08 21:57:13 +02002299 int i;
2300
2301 /*
2302 * Notify all waiters for GPU completion events that reset state has
2303 * been changed, and that they need to restart their wait after
2304 * checking for potential errors (and bail out to drop locks if there is
2305 * a gpu reset pending so that i915_error_work_func can acquire them).
2306 */
2307
2308 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2309 for_each_ring(ring, dev_priv, i)
2310 wake_up_all(&ring->irq_queue);
2311
2312 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2313 wake_up_all(&dev_priv->pending_flip_queue);
2314
2315 /*
2316 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2317 * reset state is cleared.
2318 */
2319 if (reset_completed)
2320 wake_up_all(&dev_priv->gpu_error.reset_queue);
2321}
2322
Jesse Barnes8a905232009-07-11 16:48:03 -04002323/**
2324 * i915_error_work_func - do process context error handling work
2325 * @work: work struct
2326 *
2327 * Fire an error uevent so userspace can see that a hang or error
2328 * was detected.
2329 */
2330static void i915_error_work_func(struct work_struct *work)
2331{
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002332 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2333 work);
Jani Nikula2d1013d2014-03-31 14:27:17 +03002334 struct drm_i915_private *dev_priv =
2335 container_of(error, struct drm_i915_private, gpu_error);
Jesse Barnes8a905232009-07-11 16:48:03 -04002336 struct drm_device *dev = dev_priv->dev;
Ben Widawskycce723e2013-07-19 09:16:42 -07002337 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2338 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2339 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
Daniel Vetter17e1df02013-09-08 21:57:13 +02002340 int ret;
Jesse Barnes8a905232009-07-11 16:48:03 -04002341
Dave Airlie5bdebb12013-10-11 14:07:25 +10002342 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04002343
Daniel Vetter7db0ba22012-12-06 16:23:37 +01002344 /*
2345 * Note that there's only one work item which does gpu resets, so we
2346 * need not worry about concurrent gpu resets potentially incrementing
2347 * error->reset_counter twice. We only need to take care of another
2348 * racing irq/hangcheck declaring the gpu dead for a second time. A
2349 * quick check for that is good enough: schedule_work ensures the
2350 * correct ordering between hang detection and this work item, and since
2351 * the reset in-progress bit is only ever set by code outside of this
2352 * work we don't need to worry about any other races.
2353 */
2354 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
Chris Wilsonf803aa52010-09-19 12:38:26 +01002355 DRM_DEBUG_DRIVER("resetting chip\n");
Dave Airlie5bdebb12013-10-11 14:07:25 +10002356 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
Daniel Vetter7db0ba22012-12-06 16:23:37 +01002357 reset_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002358
Daniel Vetter17e1df02013-09-08 21:57:13 +02002359 /*
Imre Deakf454c692014-04-23 01:09:04 +03002360 * In most cases it's guaranteed that we get here with an RPM
2361 * reference held, for example because there is a pending GPU
2362 * request that won't finish until the reset is done. This
2363 * isn't the case at least when we get here by doing a
2364 * simulated reset via debugs, so get an RPM reference.
2365 */
2366 intel_runtime_pm_get(dev_priv);
2367 /*
Daniel Vetter17e1df02013-09-08 21:57:13 +02002368 * All state reset _must_ be completed before we update the
2369 * reset counter, for otherwise waiters might miss the reset
2370 * pending state and not properly drop locks, resulting in
2371 * deadlocks with the reset work.
2372 */
Daniel Vetterf69061b2012-12-06 09:01:42 +01002373 ret = i915_reset(dev);
2374
Daniel Vetter17e1df02013-09-08 21:57:13 +02002375 intel_display_handle_reset(dev);
2376
Imre Deakf454c692014-04-23 01:09:04 +03002377 intel_runtime_pm_put(dev_priv);
2378
Daniel Vetterf69061b2012-12-06 09:01:42 +01002379 if (ret == 0) {
2380 /*
2381 * After all the gem state is reset, increment the reset
2382 * counter and wake up everyone waiting for the reset to
2383 * complete.
2384 *
2385 * Since unlock operations are a one-sided barrier only,
2386 * we need to insert a barrier here to order any seqno
2387 * updates before
2388 * the counter increment.
2389 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002390 smp_mb__before_atomic();
Daniel Vetterf69061b2012-12-06 09:01:42 +01002391 atomic_inc(&dev_priv->gpu_error.reset_counter);
2392
Dave Airlie5bdebb12013-10-11 14:07:25 +10002393 kobject_uevent_env(&dev->primary->kdev->kobj,
Daniel Vetterf69061b2012-12-06 09:01:42 +01002394 KOBJ_CHANGE, reset_done_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002395 } else {
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02002396 atomic_set_mask(I915_WEDGED, &error->reset_counter);
Ben Gamarif316a422009-09-14 17:48:46 -04002397 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002398
Daniel Vetter17e1df02013-09-08 21:57:13 +02002399 /*
2400 * Note: The wake_up also serves as a memory barrier so that
2401 * waiters see the update value of the reset counter atomic_t.
2402 */
2403 i915_error_wake_up(dev_priv, true);
Ben Gamarif316a422009-09-14 17:48:46 -04002404 }
Jesse Barnes8a905232009-07-11 16:48:03 -04002405}
2406
Chris Wilson35aed2e2010-05-27 13:18:12 +01002407static void i915_report_and_clear_eir(struct drm_device *dev)
Jesse Barnes8a905232009-07-11 16:48:03 -04002408{
2409 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskybd9854f2012-08-23 15:18:09 -07002410 uint32_t instdone[I915_NUM_INSTDONE_REG];
Jesse Barnes8a905232009-07-11 16:48:03 -04002411 u32 eir = I915_READ(EIR);
Ben Widawsky050ee912012-08-22 11:32:15 -07002412 int pipe, i;
Jesse Barnes8a905232009-07-11 16:48:03 -04002413
Chris Wilson35aed2e2010-05-27 13:18:12 +01002414 if (!eir)
2415 return;
Jesse Barnes8a905232009-07-11 16:48:03 -04002416
Joe Perchesa70491c2012-03-18 13:00:11 -07002417 pr_err("render error detected, EIR: 0x%08x\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04002418
Ben Widawskybd9854f2012-08-23 15:18:09 -07002419 i915_get_extra_instdone(dev, instdone);
2420
Jesse Barnes8a905232009-07-11 16:48:03 -04002421 if (IS_G4X(dev)) {
2422 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2423 u32 ipeir = I915_READ(IPEIR_I965);
2424
Joe Perchesa70491c2012-03-18 13:00:11 -07002425 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2426 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Ben Widawsky050ee912012-08-22 11:32:15 -07002427 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2428 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Joe Perchesa70491c2012-03-18 13:00:11 -07002429 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002430 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002431 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002432 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002433 }
2434 if (eir & GM45_ERROR_PAGE_TABLE) {
2435 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002436 pr_err("page table error\n");
2437 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002438 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002439 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002440 }
2441 }
2442
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002443 if (!IS_GEN2(dev)) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002444 if (eir & I915_ERROR_PAGE_TABLE) {
2445 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002446 pr_err("page table error\n");
2447 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002448 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002449 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002450 }
2451 }
2452
2453 if (eir & I915_ERROR_MEMORY_REFRESH) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002454 pr_err("memory refresh error:\n");
Damien Lespiau055e3932014-08-18 13:49:10 +01002455 for_each_pipe(dev_priv, pipe)
Joe Perchesa70491c2012-03-18 13:00:11 -07002456 pr_err("pipe %c stat: 0x%08x\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002457 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
Jesse Barnes8a905232009-07-11 16:48:03 -04002458 /* pipestat has already been acked */
2459 }
2460 if (eir & I915_ERROR_INSTRUCTION) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002461 pr_err("instruction error\n");
2462 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
Ben Widawsky050ee912012-08-22 11:32:15 -07002463 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2464 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002465 if (INTEL_INFO(dev)->gen < 4) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002466 u32 ipeir = I915_READ(IPEIR);
2467
Joe Perchesa70491c2012-03-18 13:00:11 -07002468 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2469 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
Joe Perchesa70491c2012-03-18 13:00:11 -07002470 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
Jesse Barnes8a905232009-07-11 16:48:03 -04002471 I915_WRITE(IPEIR, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002472 POSTING_READ(IPEIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002473 } else {
2474 u32 ipeir = I915_READ(IPEIR_I965);
2475
Joe Perchesa70491c2012-03-18 13:00:11 -07002476 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2477 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Joe Perchesa70491c2012-03-18 13:00:11 -07002478 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002479 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002480 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002481 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002482 }
2483 }
2484
2485 I915_WRITE(EIR, eir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002486 POSTING_READ(EIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002487 eir = I915_READ(EIR);
2488 if (eir) {
2489 /*
2490 * some errors might have become stuck,
2491 * mask them.
2492 */
2493 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2494 I915_WRITE(EMR, I915_READ(EMR) | eir);
2495 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2496 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01002497}
2498
2499/**
2500 * i915_handle_error - handle an error interrupt
2501 * @dev: drm device
2502 *
2503 * Do some basic checking of regsiter state at error interrupt time and
2504 * dump it to the syslog. Also call i915_capture_error_state() to make
2505 * sure we get a record and make it available in debugfs. Fire a uevent
2506 * so userspace knows something bad happened (should trigger collection
2507 * of a ring dump etc.).
2508 */
Mika Kuoppala58174462014-02-25 17:11:26 +02002509void i915_handle_error(struct drm_device *dev, bool wedged,
2510 const char *fmt, ...)
Chris Wilson35aed2e2010-05-27 13:18:12 +01002511{
2512 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala58174462014-02-25 17:11:26 +02002513 va_list args;
2514 char error_msg[80];
Chris Wilson35aed2e2010-05-27 13:18:12 +01002515
Mika Kuoppala58174462014-02-25 17:11:26 +02002516 va_start(args, fmt);
2517 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2518 va_end(args);
2519
2520 i915_capture_error_state(dev, wedged, error_msg);
Chris Wilson35aed2e2010-05-27 13:18:12 +01002521 i915_report_and_clear_eir(dev);
Jesse Barnes8a905232009-07-11 16:48:03 -04002522
Ben Gamariba1234d2009-09-14 17:48:47 -04002523 if (wedged) {
Daniel Vetterf69061b2012-12-06 09:01:42 +01002524 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2525 &dev_priv->gpu_error.reset_counter);
Ben Gamariba1234d2009-09-14 17:48:47 -04002526
Ben Gamari11ed50e2009-09-14 17:48:45 -04002527 /*
Daniel Vetter17e1df02013-09-08 21:57:13 +02002528 * Wakeup waiting processes so that the reset work function
2529 * i915_error_work_func doesn't deadlock trying to grab various
2530 * locks. By bumping the reset counter first, the woken
2531 * processes will see a reset in progress and back off,
2532 * releasing their locks and then wait for the reset completion.
2533 * We must do this for _all_ gpu waiters that might hold locks
2534 * that the reset work needs to acquire.
2535 *
2536 * Note: The wake_up serves as the required memory barrier to
2537 * ensure that the waiters see the updated value of the reset
2538 * counter atomic_t.
Ben Gamari11ed50e2009-09-14 17:48:45 -04002539 */
Daniel Vetter17e1df02013-09-08 21:57:13 +02002540 i915_error_wake_up(dev_priv, false);
Ben Gamari11ed50e2009-09-14 17:48:45 -04002541 }
2542
Daniel Vetter122f46b2013-09-04 17:36:14 +02002543 /*
2544 * Our reset work can grab modeset locks (since it needs to reset the
2545 * state of outstanding pagelips). Hence it must not be run on our own
2546 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2547 * code will deadlock.
2548 */
2549 schedule_work(&dev_priv->gpu_error.work);
Jesse Barnes8a905232009-07-11 16:48:03 -04002550}
2551
Keith Packard42f52ef2008-10-18 19:39:29 -07002552/* Called from drm generic code, passed 'crtc' which
2553 * we use as a pipe index
2554 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002555static int i915_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002556{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002557 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002558 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002559
Chris Wilson5eddb702010-09-11 13:48:45 +01002560 if (!i915_pipe_enabled(dev, pipe))
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002561 return -EINVAL;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002562
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002563 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002564 if (INTEL_INFO(dev)->gen >= 4)
Keith Packard7c463582008-11-04 02:03:27 -08002565 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002566 PIPE_START_VBLANK_INTERRUPT_STATUS);
Keith Packarde9d21d72008-10-16 11:31:38 -07002567 else
Keith Packard7c463582008-11-04 02:03:27 -08002568 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002569 PIPE_VBLANK_INTERRUPT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002570 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002571
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002572 return 0;
2573}
2574
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002575static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002576{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002577 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesf796cf82011-04-07 13:58:17 -07002578 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002579 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c2013-10-21 18:04:36 +02002580 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002581
2582 if (!i915_pipe_enabled(dev, pipe))
2583 return -EINVAL;
2584
2585 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002586 ironlake_enable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002587 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2588
2589 return 0;
2590}
2591
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002592static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2593{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002594 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002595 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002596
2597 if (!i915_pipe_enabled(dev, pipe))
2598 return -EINVAL;
2599
2600 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002601 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002602 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002603 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2604
2605 return 0;
2606}
2607
Ben Widawskyabd58f02013-11-02 21:07:09 -07002608static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2609{
2610 struct drm_i915_private *dev_priv = dev->dev_private;
2611 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002612
2613 if (!i915_pipe_enabled(dev, pipe))
2614 return -EINVAL;
2615
2616 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter7167d7c2013-11-07 11:05:45 +01002617 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2618 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2619 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
Ben Widawskyabd58f02013-11-02 21:07:09 -07002620 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2621 return 0;
2622}
2623
Keith Packard42f52ef2008-10-18 19:39:29 -07002624/* Called from drm generic code, passed 'crtc' which
2625 * we use as a pipe index
2626 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002627static void i915_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002628{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002629 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002630 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002631
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002632 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002633 i915_disable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002634 PIPE_VBLANK_INTERRUPT_STATUS |
2635 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002636 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2637}
2638
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002639static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002640{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002641 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesf796cf82011-04-07 13:58:17 -07002642 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002643 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c2013-10-21 18:04:36 +02002644 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002645
2646 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002647 ironlake_disable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002648 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2649}
2650
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002651static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2652{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002653 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002654 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002655
2656 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002657 i915_disable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002658 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002659 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2660}
2661
Ben Widawskyabd58f02013-11-02 21:07:09 -07002662static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2663{
2664 struct drm_i915_private *dev_priv = dev->dev_private;
2665 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002666
2667 if (!i915_pipe_enabled(dev, pipe))
2668 return;
2669
2670 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter7167d7c2013-11-07 11:05:45 +01002671 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2672 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2673 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
Ben Widawskyabd58f02013-11-02 21:07:09 -07002674 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2675}
2676
Chris Wilson893eead2010-10-27 14:44:35 +01002677static u32
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002678ring_last_seqno(struct intel_engine_cs *ring)
Zou Nan hai852835f2010-05-21 09:08:56 +08002679{
Chris Wilson893eead2010-10-27 14:44:35 +01002680 return list_entry(ring->request_list.prev,
2681 struct drm_i915_gem_request, list)->seqno;
2682}
2683
Chris Wilson9107e9d2013-06-10 11:20:20 +01002684static bool
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002685ring_idle(struct intel_engine_cs *ring, u32 seqno)
Chris Wilson893eead2010-10-27 14:44:35 +01002686{
Chris Wilson9107e9d2013-06-10 11:20:20 +01002687 return (list_empty(&ring->request_list) ||
2688 i915_seqno_passed(seqno, ring_last_seqno(ring)));
Ben Gamarif65d9422009-09-14 17:48:44 -04002689}
2690
Daniel Vettera028c4b2014-03-15 00:08:56 +01002691static bool
2692ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2693{
2694 if (INTEL_INFO(dev)->gen >= 8) {
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002695 return (ipehr >> 23) == 0x1c;
Daniel Vettera028c4b2014-03-15 00:08:56 +01002696 } else {
2697 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2698 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2699 MI_SEMAPHORE_REGISTER);
2700 }
2701}
2702
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002703static struct intel_engine_cs *
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002704semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
Daniel Vetter921d42e2014-03-18 10:26:04 +01002705{
2706 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002707 struct intel_engine_cs *signaller;
Daniel Vetter921d42e2014-03-18 10:26:04 +01002708 int i;
2709
2710 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002711 for_each_ring(signaller, dev_priv, i) {
2712 if (ring == signaller)
2713 continue;
2714
2715 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2716 return signaller;
2717 }
Daniel Vetter921d42e2014-03-18 10:26:04 +01002718 } else {
2719 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2720
2721 for_each_ring(signaller, dev_priv, i) {
2722 if(ring == signaller)
2723 continue;
2724
Ben Widawskyebc348b2014-04-29 14:52:28 -07002725 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
Daniel Vetter921d42e2014-03-18 10:26:04 +01002726 return signaller;
2727 }
2728 }
2729
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002730 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2731 ring->id, ipehr, offset);
Daniel Vetter921d42e2014-03-18 10:26:04 +01002732
2733 return NULL;
2734}
2735
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002736static struct intel_engine_cs *
2737semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
Chris Wilsona24a11e2013-03-14 17:52:05 +02002738{
2739 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Daniel Vetter88fe4292014-03-15 00:08:55 +01002740 u32 cmd, ipehr, head;
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002741 u64 offset = 0;
2742 int i, backwards;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002743
2744 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
Daniel Vettera028c4b2014-03-15 00:08:56 +01002745 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
Chris Wilson6274f212013-06-10 11:20:21 +01002746 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002747
Daniel Vetter88fe4292014-03-15 00:08:55 +01002748 /*
2749 * HEAD is likely pointing to the dword after the actual command,
2750 * so scan backwards until we find the MBOX. But limit it to just 3
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002751 * or 4 dwords depending on the semaphore wait command size.
2752 * Note that we don't care about ACTHD here since that might
Daniel Vetter88fe4292014-03-15 00:08:55 +01002753 * point at at batch, and semaphores are always emitted into the
2754 * ringbuffer itself.
Chris Wilsona24a11e2013-03-14 17:52:05 +02002755 */
Daniel Vetter88fe4292014-03-15 00:08:55 +01002756 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002757 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
Daniel Vetter88fe4292014-03-15 00:08:55 +01002758
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002759 for (i = backwards; i; --i) {
Daniel Vetter88fe4292014-03-15 00:08:55 +01002760 /*
2761 * Be paranoid and presume the hw has gone off into the wild -
2762 * our ring is smaller than what the hardware (and hence
2763 * HEAD_ADDR) allows. Also handles wrap-around.
2764 */
Oscar Mateoee1b1e52014-05-22 14:13:35 +01002765 head &= ring->buffer->size - 1;
Daniel Vetter88fe4292014-03-15 00:08:55 +01002766
2767 /* This here seems to blow up */
Oscar Mateoee1b1e52014-05-22 14:13:35 +01002768 cmd = ioread32(ring->buffer->virtual_start + head);
Chris Wilsona24a11e2013-03-14 17:52:05 +02002769 if (cmd == ipehr)
2770 break;
2771
Daniel Vetter88fe4292014-03-15 00:08:55 +01002772 head -= 4;
2773 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02002774
Daniel Vetter88fe4292014-03-15 00:08:55 +01002775 if (!i)
2776 return NULL;
2777
Oscar Mateoee1b1e52014-05-22 14:13:35 +01002778 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
Rodrigo Vivia6cdb932014-06-30 09:53:39 -07002779 if (INTEL_INFO(ring->dev)->gen >= 8) {
2780 offset = ioread32(ring->buffer->virtual_start + head + 12);
2781 offset <<= 32;
2782 offset = ioread32(ring->buffer->virtual_start + head + 8);
2783 }
2784 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
Chris Wilsona24a11e2013-03-14 17:52:05 +02002785}
2786
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002787static int semaphore_passed(struct intel_engine_cs *ring)
Chris Wilson6274f212013-06-10 11:20:21 +01002788{
2789 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002790 struct intel_engine_cs *signaller;
Chris Wilsona0d036b2014-07-19 12:40:42 +01002791 u32 seqno;
Chris Wilson6274f212013-06-10 11:20:21 +01002792
Chris Wilson4be17382014-06-06 10:22:29 +01002793 ring->hangcheck.deadlock++;
Chris Wilson6274f212013-06-10 11:20:21 +01002794
2795 signaller = semaphore_waits_for(ring, &seqno);
Chris Wilson4be17382014-06-06 10:22:29 +01002796 if (signaller == NULL)
2797 return -1;
2798
2799 /* Prevent pathological recursion due to driver bugs */
2800 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
Chris Wilson6274f212013-06-10 11:20:21 +01002801 return -1;
2802
Chris Wilson4be17382014-06-06 10:22:29 +01002803 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2804 return 1;
2805
Chris Wilsona0d036b2014-07-19 12:40:42 +01002806 /* cursory check for an unkickable deadlock */
2807 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2808 semaphore_passed(signaller) < 0)
Chris Wilson4be17382014-06-06 10:22:29 +01002809 return -1;
2810
2811 return 0;
Chris Wilson6274f212013-06-10 11:20:21 +01002812}
2813
2814static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2815{
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002816 struct intel_engine_cs *ring;
Chris Wilson6274f212013-06-10 11:20:21 +01002817 int i;
2818
2819 for_each_ring(ring, dev_priv, i)
Chris Wilson4be17382014-06-06 10:22:29 +01002820 ring->hangcheck.deadlock = 0;
Chris Wilson6274f212013-06-10 11:20:21 +01002821}
2822
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002823static enum intel_ring_hangcheck_action
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002824ring_stuck(struct intel_engine_cs *ring, u64 acthd)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002825{
2826 struct drm_device *dev = ring->dev;
2827 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002828 u32 tmp;
2829
Mika Kuoppalaf260fe72014-08-05 17:16:26 +03002830 if (acthd != ring->hangcheck.acthd) {
2831 if (acthd > ring->hangcheck.max_acthd) {
2832 ring->hangcheck.max_acthd = acthd;
2833 return HANGCHECK_ACTIVE;
2834 }
2835
2836 return HANGCHECK_ACTIVE_LOOP;
2837 }
Chris Wilson6274f212013-06-10 11:20:21 +01002838
Chris Wilson9107e9d2013-06-10 11:20:20 +01002839 if (IS_GEN2(dev))
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002840 return HANGCHECK_HUNG;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002841
2842 /* Is the chip hanging on a WAIT_FOR_EVENT?
2843 * If so we can simply poke the RB_WAIT bit
2844 * and break the hang. This should work on
2845 * all but the second generation chipsets.
2846 */
2847 tmp = I915_READ_CTL(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002848 if (tmp & RING_WAIT) {
Mika Kuoppala58174462014-02-25 17:11:26 +02002849 i915_handle_error(dev, false,
2850 "Kicking stuck wait on %s",
2851 ring->name);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002852 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002853 return HANGCHECK_KICK;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002854 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02002855
Chris Wilson6274f212013-06-10 11:20:21 +01002856 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2857 switch (semaphore_passed(ring)) {
2858 default:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002859 return HANGCHECK_HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002860 case 1:
Mika Kuoppala58174462014-02-25 17:11:26 +02002861 i915_handle_error(dev, false,
2862 "Kicking stuck semaphore on %s",
2863 ring->name);
Chris Wilson6274f212013-06-10 11:20:21 +01002864 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002865 return HANGCHECK_KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002866 case 0:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002867 return HANGCHECK_WAIT;
Chris Wilson6274f212013-06-10 11:20:21 +01002868 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002869 }
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002870
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002871 return HANGCHECK_HUNG;
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002872}
2873
Ben Gamarif65d9422009-09-14 17:48:44 -04002874/**
2875 * This is called when the chip hasn't reported back with completed
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002876 * batchbuffers in a long time. We keep track per ring seqno progress and
2877 * if there are no progress, hangcheck score for that ring is increased.
2878 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2879 * we kick the ring. If we see no progress on three subsequent calls
2880 * we assume chip is wedged and try to fix it by resetting the chip.
Ben Gamarif65d9422009-09-14 17:48:44 -04002881 */
Damien Lespiaua658b5d2013-08-08 22:28:56 +01002882static void i915_hangcheck_elapsed(unsigned long data)
Ben Gamarif65d9422009-09-14 17:48:44 -04002883{
2884 struct drm_device *dev = (struct drm_device *)data;
Jani Nikula2d1013d2014-03-31 14:27:17 +03002885 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002886 struct intel_engine_cs *ring;
Chris Wilsonb4519512012-05-11 14:29:30 +01002887 int i;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002888 int busy_count = 0, rings_hung = 0;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002889 bool stuck[I915_NUM_RINGS] = { 0 };
2890#define BUSY 1
2891#define KICK 5
2892#define HUNG 20
Chris Wilson893eead2010-10-27 14:44:35 +01002893
Jani Nikulad330a952014-01-21 11:24:25 +02002894 if (!i915.enable_hangcheck)
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07002895 return;
2896
Chris Wilsonb4519512012-05-11 14:29:30 +01002897 for_each_ring(ring, dev_priv, i) {
Chris Wilson50877442014-03-21 12:41:53 +00002898 u64 acthd;
2899 u32 seqno;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002900 bool busy = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01002901
Chris Wilson6274f212013-06-10 11:20:21 +01002902 semaphore_clear_deadlocks(dev_priv);
2903
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002904 seqno = ring->get_seqno(ring, false);
2905 acthd = intel_ring_get_active_head(ring);
Chris Wilsond1e61e72012-04-10 17:00:41 +01002906
Chris Wilson9107e9d2013-06-10 11:20:20 +01002907 if (ring->hangcheck.seqno == seqno) {
2908 if (ring_idle(ring, seqno)) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002909 ring->hangcheck.action = HANGCHECK_IDLE;
2910
Chris Wilson9107e9d2013-06-10 11:20:20 +01002911 if (waitqueue_active(&ring->irq_queue)) {
2912 /* Issue a wake-up to catch stuck h/w. */
Chris Wilson094f9a52013-09-25 17:34:55 +01002913 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
Daniel Vetterf4adcd22013-10-28 09:24:13 +01002914 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2915 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2916 ring->name);
2917 else
2918 DRM_INFO("Fake missed irq on %s\n",
2919 ring->name);
Chris Wilson094f9a52013-09-25 17:34:55 +01002920 wake_up_all(&ring->irq_queue);
2921 }
2922 /* Safeguard against driver failure */
2923 ring->hangcheck.score += BUSY;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002924 } else
2925 busy = false;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002926 } else {
Chris Wilson6274f212013-06-10 11:20:21 +01002927 /* We always increment the hangcheck score
2928 * if the ring is busy and still processing
2929 * the same request, so that no single request
2930 * can run indefinitely (such as a chain of
2931 * batches). The only time we do not increment
2932 * the hangcheck score on this ring, if this
2933 * ring is in a legitimate wait for another
2934 * ring. In that case the waiting ring is a
2935 * victim and we want to be sure we catch the
2936 * right culprit. Then every time we do kick
2937 * the ring, add a small increment to the
2938 * score so that we can catch a batch that is
2939 * being repeatedly kicked and so responsible
2940 * for stalling the machine.
2941 */
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002942 ring->hangcheck.action = ring_stuck(ring,
2943 acthd);
2944
2945 switch (ring->hangcheck.action) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002946 case HANGCHECK_IDLE:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002947 case HANGCHECK_WAIT:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002948 case HANGCHECK_ACTIVE:
Mika Kuoppalaf260fe72014-08-05 17:16:26 +03002949 break;
2950 case HANGCHECK_ACTIVE_LOOP:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002951 ring->hangcheck.score += BUSY;
Chris Wilson6274f212013-06-10 11:20:21 +01002952 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002953 case HANGCHECK_KICK:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002954 ring->hangcheck.score += KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002955 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002956 case HANGCHECK_HUNG:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002957 ring->hangcheck.score += HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002958 stuck[i] = true;
2959 break;
2960 }
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002961 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002962 } else {
Mika Kuoppalada661462013-09-06 16:03:28 +03002963 ring->hangcheck.action = HANGCHECK_ACTIVE;
2964
Chris Wilson9107e9d2013-06-10 11:20:20 +01002965 /* Gradually reduce the count so that we catch DoS
2966 * attempts across multiple batches.
2967 */
2968 if (ring->hangcheck.score > 0)
2969 ring->hangcheck.score--;
Mika Kuoppalaf260fe72014-08-05 17:16:26 +03002970
2971 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
Chris Wilsond1e61e72012-04-10 17:00:41 +01002972 }
2973
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002974 ring->hangcheck.seqno = seqno;
2975 ring->hangcheck.acthd = acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002976 busy_count += busy;
Chris Wilson893eead2010-10-27 14:44:35 +01002977 }
Eric Anholtb9201c12010-01-08 14:25:16 -08002978
Mika Kuoppala92cab732013-05-24 17:16:07 +03002979 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002980 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
Daniel Vetterb8d88d12013-08-28 10:57:59 +02002981 DRM_INFO("%s on %s\n",
2982 stuck[i] ? "stuck" : "no progress",
2983 ring->name);
Chris Wilsona43adf02013-06-10 11:20:22 +01002984 rings_hung++;
Mika Kuoppala92cab732013-05-24 17:16:07 +03002985 }
2986 }
2987
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002988 if (rings_hung)
Mika Kuoppala58174462014-02-25 17:11:26 +02002989 return i915_handle_error(dev, true, "Ring hung");
Ben Gamarif65d9422009-09-14 17:48:44 -04002990
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002991 if (busy_count)
2992 /* Reset timer case chip hangs without another request
2993 * being added */
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002994 i915_queue_hangcheck(dev);
2995}
2996
2997void i915_queue_hangcheck(struct drm_device *dev)
2998{
2999 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulad330a952014-01-21 11:24:25 +02003000 if (!i915.enable_hangcheck)
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03003001 return;
3002
3003 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3004 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
Ben Gamarif65d9422009-09-14 17:48:44 -04003005}
3006
Paulo Zanoni1c69eb42014-04-01 15:37:23 -03003007static void ibx_irq_reset(struct drm_device *dev)
Paulo Zanoni91738a92013-06-05 14:21:51 -03003008{
3009 struct drm_i915_private *dev_priv = dev->dev_private;
3010
3011 if (HAS_PCH_NOP(dev))
3012 return;
3013
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03003014 GEN5_IRQ_RESET(SDE);
Paulo Zanoni105b1222014-04-01 15:37:17 -03003015
3016 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3017 I915_WRITE(SERR_INT, 0xffffffff);
Paulo Zanoni622364b2014-04-01 15:37:22 -03003018}
Paulo Zanoni105b1222014-04-01 15:37:17 -03003019
Paulo Zanoni622364b2014-04-01 15:37:22 -03003020/*
3021 * SDEIER is also touched by the interrupt handler to work around missed PCH
3022 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3023 * instead we unconditionally enable all PCH interrupt sources here, but then
3024 * only unmask them as needed with SDEIMR.
3025 *
3026 * This function needs to be called before interrupts are enabled.
3027 */
3028static void ibx_irq_pre_postinstall(struct drm_device *dev)
3029{
3030 struct drm_i915_private *dev_priv = dev->dev_private;
3031
3032 if (HAS_PCH_NOP(dev))
3033 return;
3034
3035 WARN_ON(I915_READ(SDEIER) != 0);
Paulo Zanoni91738a92013-06-05 14:21:51 -03003036 I915_WRITE(SDEIER, 0xffffffff);
3037 POSTING_READ(SDEIER);
3038}
3039
Paulo Zanoni7c4d6642014-04-01 15:37:19 -03003040static void gen5_gt_irq_reset(struct drm_device *dev)
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02003041{
3042 struct drm_i915_private *dev_priv = dev->dev_private;
3043
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03003044 GEN5_IRQ_RESET(GT);
Paulo Zanonia9d356a2014-04-01 15:37:09 -03003045 if (INTEL_INFO(dev)->gen >= 6)
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03003046 GEN5_IRQ_RESET(GEN6_PM);
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02003047}
3048
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049/* drm_dma.h hooks
3050*/
Paulo Zanonibe30b292014-04-01 15:37:25 -03003051static void ironlake_irq_reset(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003052{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003053 struct drm_i915_private *dev_priv = dev->dev_private;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003054
Paulo Zanoni0c841212014-04-01 15:37:27 -03003055 I915_WRITE(HWSTAM, 0xffffffff);
Daniel Vetterbdfcdb62012-01-05 01:05:26 +01003056
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03003057 GEN5_IRQ_RESET(DE);
Paulo Zanonic6d954c2014-04-01 15:37:18 -03003058 if (IS_GEN7(dev))
3059 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003060
Paulo Zanoni7c4d6642014-04-01 15:37:19 -03003061 gen5_gt_irq_reset(dev);
Zhenyu Wangc6501562009-11-03 18:57:21 +00003062
Paulo Zanoni1c69eb42014-04-01 15:37:23 -03003063 ibx_irq_reset(dev);
Ben Widawsky7d991632013-05-28 19:22:25 -07003064}
3065
Ville Syrjälä70591a42014-10-30 19:42:58 +02003066static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3067{
3068 enum pipe pipe;
3069
3070 I915_WRITE(PORT_HOTPLUG_EN, 0);
3071 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3072
3073 for_each_pipe(dev_priv, pipe)
3074 I915_WRITE(PIPESTAT(pipe), 0xffff);
3075
3076 GEN5_IRQ_RESET(VLV_);
3077}
3078
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003079static void valleyview_irq_preinstall(struct drm_device *dev)
3080{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003081 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003082
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003083 /* VLV magic */
3084 I915_WRITE(VLV_IMR, 0);
3085 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3086 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3087 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3088
Paulo Zanoni7c4d6642014-04-01 15:37:19 -03003089 gen5_gt_irq_reset(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003090
Ville Syrjälä7c4cde32014-10-30 19:42:51 +02003091 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003092
Ville Syrjälä70591a42014-10-30 19:42:58 +02003093 vlv_display_irq_reset(dev_priv);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003094}
3095
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02003096static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3097{
3098 GEN8_IRQ_RESET_NDX(GT, 0);
3099 GEN8_IRQ_RESET_NDX(GT, 1);
3100 GEN8_IRQ_RESET_NDX(GT, 2);
3101 GEN8_IRQ_RESET_NDX(GT, 3);
3102}
3103
Paulo Zanoni823f6b32014-04-01 15:37:26 -03003104static void gen8_irq_reset(struct drm_device *dev)
Ben Widawskyabd58f02013-11-02 21:07:09 -07003105{
3106 struct drm_i915_private *dev_priv = dev->dev_private;
3107 int pipe;
3108
Ben Widawskyabd58f02013-11-02 21:07:09 -07003109 I915_WRITE(GEN8_MASTER_IRQ, 0);
3110 POSTING_READ(GEN8_MASTER_IRQ);
3111
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02003112 gen8_gt_irq_reset(dev_priv);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003113
Damien Lespiau055e3932014-08-18 13:49:10 +01003114 for_each_pipe(dev_priv, pipe)
Daniel Vetterf458ebb2014-09-30 10:56:39 +02003115 if (intel_display_power_is_enabled(dev_priv,
3116 POWER_DOMAIN_PIPE(pipe)))
Paulo Zanoni813bde42014-07-04 11:50:29 -03003117 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003118
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03003119 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3120 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3121 GEN5_IRQ_RESET(GEN8_PCU_);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003122
Paulo Zanoni1c69eb42014-04-01 15:37:23 -03003123 ibx_irq_reset(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003124}
Ben Widawskyabd58f02013-11-02 21:07:09 -07003125
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003126void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3127{
Paulo Zanoni1180e202014-10-07 18:02:52 -03003128 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003129
Daniel Vetter13321782014-09-15 14:55:29 +02003130 spin_lock_irq(&dev_priv->irq_lock);
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003131 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
Paulo Zanoni1180e202014-10-07 18:02:52 -03003132 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003133 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
Paulo Zanoni1180e202014-10-07 18:02:52 -03003134 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
Daniel Vetter13321782014-09-15 14:55:29 +02003135 spin_unlock_irq(&dev_priv->irq_lock);
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003136}
3137
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003138static void cherryview_irq_preinstall(struct drm_device *dev)
3139{
3140 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003141
3142 I915_WRITE(GEN8_MASTER_IRQ, 0);
3143 POSTING_READ(GEN8_MASTER_IRQ);
3144
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02003145 gen8_gt_irq_reset(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003146
3147 GEN5_IRQ_RESET(GEN8_PCU_);
3148
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003149 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3150
Ville Syrjälä70591a42014-10-30 19:42:58 +02003151 vlv_display_irq_reset(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003152}
3153
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003154static void ibx_hpd_irq_setup(struct drm_device *dev)
Keith Packard7fe0b972011-09-19 13:31:02 -07003155{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003156 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003157 struct intel_encoder *intel_encoder;
Daniel Vetterfee884e2013-07-04 23:35:21 +02003158 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
Keith Packard7fe0b972011-09-19 13:31:02 -07003159
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003160 if (HAS_PCH_IBX(dev)) {
Daniel Vetterfee884e2013-07-04 23:35:21 +02003161 hotplug_irqs = SDE_HOTPLUG_MASK;
Damien Lespiaub2784e12014-08-05 11:29:37 +01003162 for_each_intel_encoder(dev, intel_encoder)
Egbert Eichcd569ae2013-04-16 13:36:57 +02003163 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02003164 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003165 } else {
Daniel Vetterfee884e2013-07-04 23:35:21 +02003166 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
Damien Lespiaub2784e12014-08-05 11:29:37 +01003167 for_each_intel_encoder(dev, intel_encoder)
Egbert Eichcd569ae2013-04-16 13:36:57 +02003168 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02003169 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003170 }
3171
Daniel Vetterfee884e2013-07-04 23:35:21 +02003172 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003173
3174 /*
3175 * Enable digital hotplug on the PCH, and configure the DP short pulse
3176 * duration to 2ms (which is the minimum in the Display Port spec)
3177 *
3178 * This register is the same on all known PCH chips.
3179 */
Keith Packard7fe0b972011-09-19 13:31:02 -07003180 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3181 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3182 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3183 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3184 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3185 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3186}
3187
Paulo Zanonid46da432013-02-08 17:35:15 -02003188static void ibx_irq_postinstall(struct drm_device *dev)
3189{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003190 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003191 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02003192
Daniel Vetter692a04c2013-05-29 21:43:05 +02003193 if (HAS_PCH_NOP(dev))
3194 return;
3195
Paulo Zanoni105b1222014-04-01 15:37:17 -03003196 if (HAS_PCH_IBX(dev))
Daniel Vetter5c673b62014-03-07 20:34:46 +01003197 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
Paulo Zanoni105b1222014-04-01 15:37:17 -03003198 else
Daniel Vetter5c673b62014-03-07 20:34:46 +01003199 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
Paulo Zanoni86642812013-04-12 17:57:57 -03003200
Paulo Zanoni337ba012014-04-01 15:37:16 -03003201 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
Paulo Zanonid46da432013-02-08 17:35:15 -02003202 I915_WRITE(SDEIMR, ~mask);
Paulo Zanonid46da432013-02-08 17:35:15 -02003203}
3204
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003205static void gen5_gt_irq_postinstall(struct drm_device *dev)
3206{
3207 struct drm_i915_private *dev_priv = dev->dev_private;
3208 u32 pm_irqs, gt_irqs;
3209
3210 pm_irqs = gt_irqs = 0;
3211
3212 dev_priv->gt_irq_mask = ~0;
Ben Widawsky040d2ba2013-09-19 11:01:40 -07003213 if (HAS_L3_DPF(dev)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003214 /* L3 parity interrupt is always unmasked. */
Ben Widawsky35a85ac2013-09-19 11:13:41 -07003215 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3216 gt_irqs |= GT_PARITY_ERROR(dev);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003217 }
3218
3219 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3220 if (IS_GEN5(dev)) {
3221 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3222 ILK_BSD_USER_INTERRUPT;
3223 } else {
3224 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3225 }
3226
Paulo Zanoni35079892014-04-01 15:37:15 -03003227 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003228
3229 if (INTEL_INFO(dev)->gen >= 6) {
Deepak Sa6706b42014-03-15 20:23:22 +05303230 pm_irqs |= dev_priv->pm_rps_events;
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003231
3232 if (HAS_VEBOX(dev))
3233 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3234
Paulo Zanoni605cd252013-08-06 18:57:15 -03003235 dev_priv->pm_irq_mask = 0xffffffff;
Paulo Zanoni35079892014-04-01 15:37:15 -03003236 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003237 }
3238}
3239
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003240static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003241{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003242 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003243 u32 display_mask, extra_mask;
3244
3245 if (INTEL_INFO(dev)->gen >= 7) {
3246 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3247 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3248 DE_PLANEB_FLIP_DONE_IVB |
Daniel Vetter5c673b62014-03-07 20:34:46 +01003249 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003250 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
Daniel Vetter5c673b62014-03-07 20:34:46 +01003251 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003252 } else {
3253 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3254 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02003255 DE_AUX_CHANNEL_A |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02003256 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3257 DE_POISON);
Daniel Vetter5c673b62014-03-07 20:34:46 +01003258 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3259 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003260 }
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003261
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003262 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003263
Paulo Zanoni0c841212014-04-01 15:37:27 -03003264 I915_WRITE(HWSTAM, 0xeffe);
3265
Paulo Zanoni622364b2014-04-01 15:37:22 -03003266 ibx_irq_pre_postinstall(dev);
3267
Paulo Zanoni35079892014-04-01 15:37:15 -03003268 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003269
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003270 gen5_gt_irq_postinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003271
Paulo Zanonid46da432013-02-08 17:35:15 -02003272 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07003273
Jesse Barnesf97108d2010-01-29 11:27:07 -08003274 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02003275 /* Enable PCU event interrupts
3276 *
3277 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02003278 * setup is guaranteed to run in single-threaded context. But we
3279 * need it to make the assert_spin_locked happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003280 spin_lock_irq(&dev_priv->irq_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -08003281 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetterd6207432014-09-15 14:55:27 +02003282 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -08003283 }
3284
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003285 return 0;
3286}
3287
Imre Deakf8b79e52014-03-04 19:23:07 +02003288static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3289{
3290 u32 pipestat_mask;
3291 u32 iir_mask;
Ville Syrjälä120dda42014-10-30 19:42:57 +02003292 enum pipe pipe;
Imre Deakf8b79e52014-03-04 19:23:07 +02003293
3294 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3295 PIPE_FIFO_UNDERRUN_STATUS;
3296
Ville Syrjälä120dda42014-10-30 19:42:57 +02003297 for_each_pipe(dev_priv, pipe)
3298 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
Imre Deakf8b79e52014-03-04 19:23:07 +02003299 POSTING_READ(PIPESTAT(PIPE_A));
3300
3301 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3302 PIPE_CRC_DONE_INTERRUPT_STATUS;
3303
Ville Syrjälä120dda42014-10-30 19:42:57 +02003304 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3305 for_each_pipe(dev_priv, pipe)
3306 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
Imre Deakf8b79e52014-03-04 19:23:07 +02003307
3308 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3309 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3310 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
Ville Syrjälä120dda42014-10-30 19:42:57 +02003311 if (IS_CHERRYVIEW(dev_priv))
3312 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
Imre Deakf8b79e52014-03-04 19:23:07 +02003313 dev_priv->irq_mask &= ~iir_mask;
3314
3315 I915_WRITE(VLV_IIR, iir_mask);
3316 I915_WRITE(VLV_IIR, iir_mask);
Imre Deakf8b79e52014-03-04 19:23:07 +02003317 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
Ville Syrjälä76e41862014-10-30 19:42:54 +02003318 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3319 POSTING_READ(VLV_IMR);
Imre Deakf8b79e52014-03-04 19:23:07 +02003320}
3321
3322static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3323{
3324 u32 pipestat_mask;
3325 u32 iir_mask;
Ville Syrjälä120dda42014-10-30 19:42:57 +02003326 enum pipe pipe;
Imre Deakf8b79e52014-03-04 19:23:07 +02003327
3328 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3329 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
Imre Deak6c7fba02014-03-10 19:44:48 +02003330 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
Ville Syrjälä120dda42014-10-30 19:42:57 +02003331 if (IS_CHERRYVIEW(dev_priv))
3332 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
Imre Deakf8b79e52014-03-04 19:23:07 +02003333
3334 dev_priv->irq_mask |= iir_mask;
Imre Deakf8b79e52014-03-04 19:23:07 +02003335 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
Ville Syrjälä76e41862014-10-30 19:42:54 +02003336 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
Imre Deakf8b79e52014-03-04 19:23:07 +02003337 I915_WRITE(VLV_IIR, iir_mask);
3338 I915_WRITE(VLV_IIR, iir_mask);
3339 POSTING_READ(VLV_IIR);
3340
3341 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3342 PIPE_CRC_DONE_INTERRUPT_STATUS;
3343
Ville Syrjälä120dda42014-10-30 19:42:57 +02003344 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3345 for_each_pipe(dev_priv, pipe)
3346 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
Imre Deakf8b79e52014-03-04 19:23:07 +02003347
3348 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3349 PIPE_FIFO_UNDERRUN_STATUS;
Ville Syrjälä120dda42014-10-30 19:42:57 +02003350
3351 for_each_pipe(dev_priv, pipe)
3352 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
Imre Deakf8b79e52014-03-04 19:23:07 +02003353 POSTING_READ(PIPESTAT(PIPE_A));
3354}
3355
3356void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3357{
3358 assert_spin_locked(&dev_priv->irq_lock);
3359
3360 if (dev_priv->display_irqs_enabled)
3361 return;
3362
3363 dev_priv->display_irqs_enabled = true;
3364
Imre Deak950eaba2014-09-08 15:21:09 +03003365 if (intel_irqs_enabled(dev_priv))
Imre Deakf8b79e52014-03-04 19:23:07 +02003366 valleyview_display_irqs_install(dev_priv);
3367}
3368
3369void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3370{
3371 assert_spin_locked(&dev_priv->irq_lock);
3372
3373 if (!dev_priv->display_irqs_enabled)
3374 return;
3375
3376 dev_priv->display_irqs_enabled = false;
3377
Imre Deak950eaba2014-09-08 15:21:09 +03003378 if (intel_irqs_enabled(dev_priv))
Imre Deakf8b79e52014-03-04 19:23:07 +02003379 valleyview_display_irqs_uninstall(dev_priv);
3380}
3381
Ville Syrjälä0e6c9a92014-10-30 19:43:00 +02003382static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003383{
Imre Deakf8b79e52014-03-04 19:23:07 +02003384 dev_priv->irq_mask = ~0;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003385
Daniel Vetter20afbda2012-12-11 14:05:07 +01003386 I915_WRITE(PORT_HOTPLUG_EN, 0);
3387 POSTING_READ(PORT_HOTPLUG_EN);
3388
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003389 I915_WRITE(VLV_IIR, 0xffffffff);
Ville Syrjälä76e41862014-10-30 19:42:54 +02003390 I915_WRITE(VLV_IIR, 0xffffffff);
3391 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3392 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3393 POSTING_READ(VLV_IMR);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003394
Daniel Vetterb79480b2013-06-27 17:52:10 +02003395 /* Interrupt setup is already guaranteed to be single-threaded, this is
3396 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003397 spin_lock_irq(&dev_priv->irq_lock);
Imre Deakf8b79e52014-03-04 19:23:07 +02003398 if (dev_priv->display_irqs_enabled)
3399 valleyview_display_irqs_install(dev_priv);
Daniel Vetterd6207432014-09-15 14:55:27 +02003400 spin_unlock_irq(&dev_priv->irq_lock);
Ville Syrjälä0e6c9a92014-10-30 19:43:00 +02003401}
3402
3403static int valleyview_irq_postinstall(struct drm_device *dev)
3404{
3405 struct drm_i915_private *dev_priv = dev->dev_private;
3406
3407 vlv_display_irq_postinstall(dev_priv);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003408
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003409 gen5_gt_irq_postinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003410
3411 /* ack & enable invalid PTE error interrupts */
3412#if 0 /* FIXME: add support to irq handler for checking these bits */
3413 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3414 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3415#endif
3416
3417 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003418
3419 return 0;
3420}
3421
Ben Widawskyabd58f02013-11-02 21:07:09 -07003422static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3423{
Ben Widawskyabd58f02013-11-02 21:07:09 -07003424 /* These are interrupts we'll toggle with the ring mask register */
3425 uint32_t gt_interrupts[] = {
3426 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003427 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
Ben Widawskyabd58f02013-11-02 21:07:09 -07003428 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003429 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3430 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
Ben Widawskyabd58f02013-11-02 21:07:09 -07003431 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003432 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3433 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3434 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
Ben Widawskyabd58f02013-11-02 21:07:09 -07003435 0,
Oscar Mateo73d477f2014-07-24 17:04:31 +01003436 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3437 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
Ben Widawskyabd58f02013-11-02 21:07:09 -07003438 };
3439
Ben Widawsky09610212014-05-15 20:58:08 +03003440 dev_priv->pm_irq_mask = 0xffffffff;
Deepak S9a2d2d82014-08-22 08:32:40 +05303441 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3442 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3443 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
3444 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003445}
3446
3447static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3448{
Damien Lespiau770de832014-03-20 20:45:01 +00003449 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3450 uint32_t de_pipe_enables;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003451 int pipe;
Damien Lespiau770de832014-03-20 20:45:01 +00003452
3453 if (IS_GEN9(dev_priv))
3454 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3455 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3456 else
3457 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3458 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3459
3460 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3461 GEN8_PIPE_FIFO_UNDERRUN;
3462
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01003463 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3464 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3465 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003466
Damien Lespiau055e3932014-08-18 13:49:10 +01003467 for_each_pipe(dev_priv, pipe)
Daniel Vetterf458ebb2014-09-30 10:56:39 +02003468 if (intel_display_power_is_enabled(dev_priv,
Paulo Zanoni813bde42014-07-04 11:50:29 -03003469 POWER_DOMAIN_PIPE(pipe)))
3470 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3471 dev_priv->de_irq_mask[pipe],
3472 de_pipe_enables);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003473
Paulo Zanoni35079892014-04-01 15:37:15 -03003474 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003475}
3476
3477static int gen8_irq_postinstall(struct drm_device *dev)
3478{
3479 struct drm_i915_private *dev_priv = dev->dev_private;
3480
Paulo Zanoni622364b2014-04-01 15:37:22 -03003481 ibx_irq_pre_postinstall(dev);
3482
Ben Widawskyabd58f02013-11-02 21:07:09 -07003483 gen8_gt_irq_postinstall(dev_priv);
3484 gen8_de_irq_postinstall(dev_priv);
3485
3486 ibx_irq_postinstall(dev);
3487
3488 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3489 POSTING_READ(GEN8_MASTER_IRQ);
3490
3491 return 0;
3492}
3493
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003494static int cherryview_irq_postinstall(struct drm_device *dev)
3495{
3496 struct drm_i915_private *dev_priv = dev->dev_private;
3497 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3498 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003499 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Ville Syrjälä3278f672014-04-09 13:28:49 +03003500 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3501 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3502 PIPE_CRC_DONE_INTERRUPT_STATUS;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003503 int pipe;
3504
3505 /*
3506 * Leave vblank interrupts masked initially. enable/disable will
3507 * toggle them based on usage.
3508 */
Ville Syrjälä3278f672014-04-09 13:28:49 +03003509 dev_priv->irq_mask = ~enable_mask;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003510
Damien Lespiau055e3932014-08-18 13:49:10 +01003511 for_each_pipe(dev_priv, pipe)
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003512 I915_WRITE(PIPESTAT(pipe), 0xffff);
3513
Daniel Vetterd6207432014-09-15 14:55:27 +02003514 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä3278f672014-04-09 13:28:49 +03003515 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
Damien Lespiau055e3932014-08-18 13:49:10 +01003516 for_each_pipe(dev_priv, pipe)
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003517 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
Daniel Vetterd6207432014-09-15 14:55:27 +02003518 spin_unlock_irq(&dev_priv->irq_lock);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003519
3520 I915_WRITE(VLV_IIR, 0xffffffff);
Ville Syrjälä76e41862014-10-30 19:42:54 +02003521 I915_WRITE(VLV_IIR, 0xffffffff);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003522 I915_WRITE(VLV_IER, enable_mask);
Ville Syrjälä76e41862014-10-30 19:42:54 +02003523 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3524 POSTING_READ(VLV_IMR);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003525
3526 gen8_gt_irq_postinstall(dev_priv);
3527
3528 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3529 POSTING_READ(GEN8_MASTER_IRQ);
3530
3531 return 0;
3532}
3533
Ben Widawskyabd58f02013-11-02 21:07:09 -07003534static void gen8_irq_uninstall(struct drm_device *dev)
3535{
3536 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003537
3538 if (!dev_priv)
3539 return;
3540
Paulo Zanoni823f6b32014-04-01 15:37:26 -03003541 gen8_irq_reset(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003542}
3543
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003544static void valleyview_irq_uninstall(struct drm_device *dev)
3545{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003546 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003547
3548 if (!dev_priv)
3549 return;
3550
Imre Deak843d0e72014-04-14 20:24:23 +03003551 I915_WRITE(VLV_MASTER_IER, 0);
3552
Ville Syrjälä893fce82014-10-30 19:42:56 +02003553 gen5_gt_irq_reset(dev);
3554
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003555 I915_WRITE(HWSTAM, 0xffffffff);
Imre Deakf8b79e52014-03-04 19:23:07 +02003556
Daniel Vetterd6207432014-09-15 14:55:27 +02003557 /* Interrupt setup is already guaranteed to be single-threaded, this is
3558 * just to make the assert_spin_locked check happy. */
3559 spin_lock_irq(&dev_priv->irq_lock);
Imre Deakf8b79e52014-03-04 19:23:07 +02003560 if (dev_priv->display_irqs_enabled)
3561 valleyview_display_irqs_uninstall(dev_priv);
Daniel Vetterd6207432014-09-15 14:55:27 +02003562 spin_unlock_irq(&dev_priv->irq_lock);
Imre Deakf8b79e52014-03-04 19:23:07 +02003563
Ville Syrjälä70591a42014-10-30 19:42:58 +02003564 vlv_display_irq_reset(dev_priv);
Imre Deakf8b79e52014-03-04 19:23:07 +02003565
Ville Syrjälä70591a42014-10-30 19:42:58 +02003566 dev_priv->irq_mask = 0;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003567}
3568
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003569static void cherryview_irq_uninstall(struct drm_device *dev)
3570{
3571 struct drm_i915_private *dev_priv = dev->dev_private;
3572 int pipe;
3573
3574 if (!dev_priv)
3575 return;
3576
3577 I915_WRITE(GEN8_MASTER_IRQ, 0);
3578 POSTING_READ(GEN8_MASTER_IRQ);
3579
Ville Syrjäläa2c30fb2014-10-30 19:42:52 +02003580 gen8_gt_irq_reset(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003581
Ville Syrjäläa2c30fb2014-10-30 19:42:52 +02003582 GEN5_IRQ_RESET(GEN8_PCU_);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003583
3584 I915_WRITE(PORT_HOTPLUG_EN, 0);
3585 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3586
Damien Lespiau055e3932014-08-18 13:49:10 +01003587 for_each_pipe(dev_priv, pipe)
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003588 I915_WRITE(PIPESTAT(pipe), 0xffff);
3589
Ville Syrjälä23a09c72014-10-30 19:42:55 +02003590 GEN5_IRQ_RESET(VLV_);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003591}
3592
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003593static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003594{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003595 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes46979952011-04-07 13:53:55 -07003596
3597 if (!dev_priv)
3598 return;
3599
Paulo Zanonibe30b292014-04-01 15:37:25 -03003600 ironlake_irq_reset(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003601}
3602
Chris Wilsonc2798b12012-04-22 21:13:57 +01003603static void i8xx_irq_preinstall(struct drm_device * dev)
3604{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003605 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003606 int pipe;
3607
Damien Lespiau055e3932014-08-18 13:49:10 +01003608 for_each_pipe(dev_priv, pipe)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003609 I915_WRITE(PIPESTAT(pipe), 0);
3610 I915_WRITE16(IMR, 0xffff);
3611 I915_WRITE16(IER, 0x0);
3612 POSTING_READ16(IER);
3613}
3614
3615static int i8xx_irq_postinstall(struct drm_device *dev)
3616{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003617 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003618
Chris Wilsonc2798b12012-04-22 21:13:57 +01003619 I915_WRITE16(EMR,
3620 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3621
3622 /* Unmask the interrupts that we always want on. */
3623 dev_priv->irq_mask =
3624 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3625 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3626 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3627 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3628 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3629 I915_WRITE16(IMR, dev_priv->irq_mask);
3630
3631 I915_WRITE16(IER,
3632 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3633 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3634 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3635 I915_USER_INTERRUPT);
3636 POSTING_READ16(IER);
3637
Daniel Vetter379ef822013-10-16 22:55:56 +02003638 /* Interrupt setup is already guaranteed to be single-threaded, this is
3639 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003640 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02003641 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3642 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02003643 spin_unlock_irq(&dev_priv->irq_lock);
Daniel Vetter379ef822013-10-16 22:55:56 +02003644
Chris Wilsonc2798b12012-04-22 21:13:57 +01003645 return 0;
3646}
3647
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003648/*
3649 * Returns true when a page flip has completed.
3650 */
3651static bool i8xx_handle_vblank(struct drm_device *dev,
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003652 int plane, int pipe, u32 iir)
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003653{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003654 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003655 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003656
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03003657 if (!intel_pipe_handle_vblank(dev, pipe))
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003658 return false;
3659
3660 if ((iir & flip_pending) == 0)
Chris Wilsond6bbafa2014-09-05 07:13:24 +01003661 goto check_page_flip;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003662
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003663 intel_prepare_page_flip(dev, plane);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003664
3665 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3666 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3667 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3668 * the flip is completed (no longer pending). Since this doesn't raise
3669 * an interrupt per se, we watch for the change at vblank.
3670 */
3671 if (I915_READ16(ISR) & flip_pending)
Chris Wilsond6bbafa2014-09-05 07:13:24 +01003672 goto check_page_flip;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003673
3674 intel_finish_page_flip(dev, pipe);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003675 return true;
Chris Wilsond6bbafa2014-09-05 07:13:24 +01003676
3677check_page_flip:
3678 intel_check_page_flip(dev, pipe);
3679 return false;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003680}
3681
Daniel Vetterff1f5252012-10-02 15:10:55 +02003682static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003683{
Daniel Vetter45a83f82014-05-12 19:17:55 +02003684 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03003685 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003686 u16 iir, new_iir;
3687 u32 pipe_stats[2];
Chris Wilsonc2798b12012-04-22 21:13:57 +01003688 int pipe;
3689 u16 flip_mask =
3690 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3691 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3692
Chris Wilsonc2798b12012-04-22 21:13:57 +01003693 iir = I915_READ16(IIR);
3694 if (iir == 0)
3695 return IRQ_NONE;
3696
3697 while (iir & ~flip_mask) {
3698 /* Can't rely on pipestat interrupt bit in iir as it might
3699 * have been cleared after the pipestat interrupt was received.
3700 * It doesn't set the bit in iir again, but it still produces
3701 * interrupts (for non-MSI).
3702 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02003703 spin_lock(&dev_priv->irq_lock);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003704 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Mika Kuoppala58174462014-02-25 17:11:26 +02003705 i915_handle_error(dev, false,
3706 "Command parser error, iir 0x%08x",
3707 iir);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003708
Damien Lespiau055e3932014-08-18 13:49:10 +01003709 for_each_pipe(dev_priv, pipe) {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003710 int reg = PIPESTAT(pipe);
3711 pipe_stats[pipe] = I915_READ(reg);
3712
3713 /*
3714 * Clear the PIPE*STAT regs before the IIR
3715 */
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003716 if (pipe_stats[pipe] & 0x8000ffff)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003717 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003718 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02003719 spin_unlock(&dev_priv->irq_lock);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003720
3721 I915_WRITE16(IIR, iir & ~flip_mask);
3722 new_iir = I915_READ16(IIR); /* Flush posted writes */
3723
Daniel Vetterd05c6172012-04-26 23:28:09 +02003724 i915_update_dri1_breadcrumb(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003725
3726 if (iir & I915_USER_INTERRUPT)
3727 notify_ring(dev, &dev_priv->ring[RCS]);
3728
Damien Lespiau055e3932014-08-18 13:49:10 +01003729 for_each_pipe(dev_priv, pipe) {
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003730 int plane = pipe;
Daniel Vetter3a77c4c2014-01-10 08:50:12 +01003731 if (HAS_FBC(dev))
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003732 plane = !plane;
3733
Daniel Vetter4356d582013-10-16 22:55:55 +02003734 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003735 i8xx_handle_vblank(dev, plane, pipe, iir))
3736 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003737
Daniel Vetter4356d582013-10-16 22:55:55 +02003738 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003739 i9xx_pipe_crc_irq_handler(dev, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003740
Daniel Vetter1f7247c2014-09-30 10:56:48 +02003741 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3742 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3743 pipe);
Daniel Vetter4356d582013-10-16 22:55:55 +02003744 }
Chris Wilsonc2798b12012-04-22 21:13:57 +01003745
3746 iir = new_iir;
3747 }
3748
3749 return IRQ_HANDLED;
3750}
3751
3752static void i8xx_irq_uninstall(struct drm_device * dev)
3753{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003754 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003755 int pipe;
3756
Damien Lespiau055e3932014-08-18 13:49:10 +01003757 for_each_pipe(dev_priv, pipe) {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003758 /* Clear enable bits; then clear status bits */
3759 I915_WRITE(PIPESTAT(pipe), 0);
3760 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3761 }
3762 I915_WRITE16(IMR, 0xffff);
3763 I915_WRITE16(IER, 0x0);
3764 I915_WRITE16(IIR, I915_READ16(IIR));
3765}
3766
Chris Wilsona266c7d2012-04-24 22:59:44 +01003767static void i915_irq_preinstall(struct drm_device * dev)
3768{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003769 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003770 int pipe;
3771
Chris Wilsona266c7d2012-04-24 22:59:44 +01003772 if (I915_HAS_HOTPLUG(dev)) {
3773 I915_WRITE(PORT_HOTPLUG_EN, 0);
3774 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3775 }
3776
Chris Wilson00d98eb2012-04-24 22:59:48 +01003777 I915_WRITE16(HWSTAM, 0xeffe);
Damien Lespiau055e3932014-08-18 13:49:10 +01003778 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003779 I915_WRITE(PIPESTAT(pipe), 0);
3780 I915_WRITE(IMR, 0xffffffff);
3781 I915_WRITE(IER, 0x0);
3782 POSTING_READ(IER);
3783}
3784
3785static int i915_irq_postinstall(struct drm_device *dev)
3786{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003787 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson38bde182012-04-24 22:59:50 +01003788 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003789
Chris Wilson38bde182012-04-24 22:59:50 +01003790 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3791
3792 /* Unmask the interrupts that we always want on. */
3793 dev_priv->irq_mask =
3794 ~(I915_ASLE_INTERRUPT |
3795 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3796 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3797 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3798 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3799 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3800
3801 enable_mask =
3802 I915_ASLE_INTERRUPT |
3803 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3804 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3805 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3806 I915_USER_INTERRUPT;
3807
Chris Wilsona266c7d2012-04-24 22:59:44 +01003808 if (I915_HAS_HOTPLUG(dev)) {
Daniel Vetter20afbda2012-12-11 14:05:07 +01003809 I915_WRITE(PORT_HOTPLUG_EN, 0);
3810 POSTING_READ(PORT_HOTPLUG_EN);
3811
Chris Wilsona266c7d2012-04-24 22:59:44 +01003812 /* Enable in IER... */
3813 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3814 /* and unmask in IMR */
3815 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3816 }
3817
Chris Wilsona266c7d2012-04-24 22:59:44 +01003818 I915_WRITE(IMR, dev_priv->irq_mask);
3819 I915_WRITE(IER, enable_mask);
3820 POSTING_READ(IER);
3821
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003822 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003823
Daniel Vetter379ef822013-10-16 22:55:56 +02003824 /* Interrupt setup is already guaranteed to be single-threaded, this is
3825 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003826 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02003827 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3828 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02003829 spin_unlock_irq(&dev_priv->irq_lock);
Daniel Vetter379ef822013-10-16 22:55:56 +02003830
Daniel Vetter20afbda2012-12-11 14:05:07 +01003831 return 0;
3832}
3833
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003834/*
3835 * Returns true when a page flip has completed.
3836 */
3837static bool i915_handle_vblank(struct drm_device *dev,
3838 int plane, int pipe, u32 iir)
3839{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003840 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003841 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3842
Ville Syrjälä8d7849d2014-04-29 13:35:46 +03003843 if (!intel_pipe_handle_vblank(dev, pipe))
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003844 return false;
3845
3846 if ((iir & flip_pending) == 0)
Chris Wilsond6bbafa2014-09-05 07:13:24 +01003847 goto check_page_flip;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003848
3849 intel_prepare_page_flip(dev, plane);
3850
3851 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3852 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3853 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3854 * the flip is completed (no longer pending). Since this doesn't raise
3855 * an interrupt per se, we watch for the change at vblank.
3856 */
3857 if (I915_READ(ISR) & flip_pending)
Chris Wilsond6bbafa2014-09-05 07:13:24 +01003858 goto check_page_flip;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003859
3860 intel_finish_page_flip(dev, pipe);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003861 return true;
Chris Wilsond6bbafa2014-09-05 07:13:24 +01003862
3863check_page_flip:
3864 intel_check_page_flip(dev, pipe);
3865 return false;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003866}
3867
Daniel Vetterff1f5252012-10-02 15:10:55 +02003868static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003869{
Daniel Vetter45a83f82014-05-12 19:17:55 +02003870 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03003871 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8291ee92012-04-24 22:59:47 +01003872 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilson38bde182012-04-24 22:59:50 +01003873 u32 flip_mask =
3874 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3875 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilson38bde182012-04-24 22:59:50 +01003876 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003877
Chris Wilsona266c7d2012-04-24 22:59:44 +01003878 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01003879 do {
3880 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01003881 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003882
3883 /* Can't rely on pipestat interrupt bit in iir as it might
3884 * have been cleared after the pipestat interrupt was received.
3885 * It doesn't set the bit in iir again, but it still produces
3886 * interrupts (for non-MSI).
3887 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02003888 spin_lock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003889 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Mika Kuoppala58174462014-02-25 17:11:26 +02003890 i915_handle_error(dev, false,
3891 "Command parser error, iir 0x%08x",
3892 iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003893
Damien Lespiau055e3932014-08-18 13:49:10 +01003894 for_each_pipe(dev_priv, pipe) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003895 int reg = PIPESTAT(pipe);
3896 pipe_stats[pipe] = I915_READ(reg);
3897
Chris Wilson38bde182012-04-24 22:59:50 +01003898 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003899 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003900 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01003901 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003902 }
3903 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02003904 spin_unlock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003905
3906 if (!irq_received)
3907 break;
3908
Chris Wilsona266c7d2012-04-24 22:59:44 +01003909 /* Consume port. Then clear IIR or we'll miss events */
Ville Syrjälä16c6c562014-04-01 10:54:36 +03003910 if (I915_HAS_HOTPLUG(dev) &&
3911 iir & I915_DISPLAY_PORT_INTERRUPT)
3912 i9xx_hpd_irq_handler(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003913
Chris Wilson38bde182012-04-24 22:59:50 +01003914 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003915 new_iir = I915_READ(IIR); /* Flush posted writes */
3916
Chris Wilsona266c7d2012-04-24 22:59:44 +01003917 if (iir & I915_USER_INTERRUPT)
3918 notify_ring(dev, &dev_priv->ring[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003919
Damien Lespiau055e3932014-08-18 13:49:10 +01003920 for_each_pipe(dev_priv, pipe) {
Chris Wilson38bde182012-04-24 22:59:50 +01003921 int plane = pipe;
Daniel Vetter3a77c4c2014-01-10 08:50:12 +01003922 if (HAS_FBC(dev))
Chris Wilson38bde182012-04-24 22:59:50 +01003923 plane = !plane;
Ville Syrjälä5e2032d2013-02-19 15:16:38 +02003924
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003925 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3926 i915_handle_vblank(dev, plane, pipe, iir))
3927 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003928
3929 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3930 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003931
3932 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003933 i9xx_pipe_crc_irq_handler(dev, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003934
Daniel Vetter1f7247c2014-09-30 10:56:48 +02003935 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3936 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3937 pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003938 }
3939
Chris Wilsona266c7d2012-04-24 22:59:44 +01003940 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3941 intel_opregion_asle_intr(dev);
3942
3943 /* With MSI, interrupts are only generated when iir
3944 * transitions from zero to nonzero. If another bit got
3945 * set while we were handling the existing iir bits, then
3946 * we would never get another interrupt.
3947 *
3948 * This is fine on non-MSI as well, as if we hit this path
3949 * we avoid exiting the interrupt handler only to generate
3950 * another one.
3951 *
3952 * Note that for MSI this could cause a stray interrupt report
3953 * if an interrupt landed in the time between writing IIR and
3954 * the posting read. This should be rare enough to never
3955 * trigger the 99% of 100,000 interrupts test for disabling
3956 * stray interrupts.
3957 */
Chris Wilson38bde182012-04-24 22:59:50 +01003958 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003959 iir = new_iir;
Chris Wilson38bde182012-04-24 22:59:50 +01003960 } while (iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003961
Daniel Vetterd05c6172012-04-26 23:28:09 +02003962 i915_update_dri1_breadcrumb(dev);
Chris Wilson8291ee92012-04-24 22:59:47 +01003963
Chris Wilsona266c7d2012-04-24 22:59:44 +01003964 return ret;
3965}
3966
3967static void i915_irq_uninstall(struct drm_device * dev)
3968{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003969 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003970 int pipe;
3971
Chris Wilsona266c7d2012-04-24 22:59:44 +01003972 if (I915_HAS_HOTPLUG(dev)) {
3973 I915_WRITE(PORT_HOTPLUG_EN, 0);
3974 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3975 }
3976
Chris Wilson00d98eb2012-04-24 22:59:48 +01003977 I915_WRITE16(HWSTAM, 0xffff);
Damien Lespiau055e3932014-08-18 13:49:10 +01003978 for_each_pipe(dev_priv, pipe) {
Chris Wilson55b39752012-04-24 22:59:49 +01003979 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003980 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01003981 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3982 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003983 I915_WRITE(IMR, 0xffffffff);
3984 I915_WRITE(IER, 0x0);
3985
Chris Wilsona266c7d2012-04-24 22:59:44 +01003986 I915_WRITE(IIR, I915_READ(IIR));
3987}
3988
3989static void i965_irq_preinstall(struct drm_device * dev)
3990{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003991 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003992 int pipe;
3993
Chris Wilsonadca4732012-05-11 18:01:31 +01003994 I915_WRITE(PORT_HOTPLUG_EN, 0);
3995 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003996
3997 I915_WRITE(HWSTAM, 0xeffe);
Damien Lespiau055e3932014-08-18 13:49:10 +01003998 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003999 I915_WRITE(PIPESTAT(pipe), 0);
4000 I915_WRITE(IMR, 0xffffffff);
4001 I915_WRITE(IER, 0x0);
4002 POSTING_READ(IER);
4003}
4004
4005static int i965_irq_postinstall(struct drm_device *dev)
4006{
Jani Nikula2d1013d2014-03-31 14:27:17 +03004007 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonbbba0a92012-04-24 22:59:51 +01004008 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004009 u32 error_mask;
4010
Chris Wilsona266c7d2012-04-24 22:59:44 +01004011 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01004012 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01004013 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01004014 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4015 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4016 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4017 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4018 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4019
4020 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02004021 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4022 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01004023 enable_mask |= I915_USER_INTERRUPT;
4024
4025 if (IS_G4X(dev))
4026 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004027
Daniel Vetterb79480b2013-06-27 17:52:10 +02004028 /* Interrupt setup is already guaranteed to be single-threaded, this is
4029 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02004030 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02004031 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4032 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4033 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02004034 spin_unlock_irq(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004035
Chris Wilsona266c7d2012-04-24 22:59:44 +01004036 /*
4037 * Enable some error detection, note the instruction error mask
4038 * bit is reserved, so we leave it masked.
4039 */
4040 if (IS_G4X(dev)) {
4041 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4042 GM45_ERROR_MEM_PRIV |
4043 GM45_ERROR_CP_PRIV |
4044 I915_ERROR_MEMORY_REFRESH);
4045 } else {
4046 error_mask = ~(I915_ERROR_PAGE_TABLE |
4047 I915_ERROR_MEMORY_REFRESH);
4048 }
4049 I915_WRITE(EMR, error_mask);
4050
4051 I915_WRITE(IMR, dev_priv->irq_mask);
4052 I915_WRITE(IER, enable_mask);
4053 POSTING_READ(IER);
4054
Daniel Vetter20afbda2012-12-11 14:05:07 +01004055 I915_WRITE(PORT_HOTPLUG_EN, 0);
4056 POSTING_READ(PORT_HOTPLUG_EN);
4057
Jani Nikulaf49e38d2013-04-29 13:02:54 +03004058 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004059
4060 return 0;
4061}
4062
Egbert Eichbac56d52013-02-25 12:06:51 -05004063static void i915_hpd_irq_setup(struct drm_device *dev)
Daniel Vetter20afbda2012-12-11 14:05:07 +01004064{
Jani Nikula2d1013d2014-03-31 14:27:17 +03004065 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eichcd569ae2013-04-16 13:36:57 +02004066 struct intel_encoder *intel_encoder;
Daniel Vetter20afbda2012-12-11 14:05:07 +01004067 u32 hotplug_en;
4068
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02004069 assert_spin_locked(&dev_priv->irq_lock);
4070
Egbert Eichbac56d52013-02-25 12:06:51 -05004071 if (I915_HAS_HOTPLUG(dev)) {
4072 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4073 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4074 /* Note HDMI and DP share hotplug bits */
Egbert Eiche5868a32013-02-28 04:17:12 -05004075 /* enable bits are the same for all generations */
Damien Lespiaub2784e12014-08-05 11:29:37 +01004076 for_each_intel_encoder(dev, intel_encoder)
Egbert Eichcd569ae2013-04-16 13:36:57 +02004077 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4078 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
Egbert Eichbac56d52013-02-25 12:06:51 -05004079 /* Programming the CRT detection parameters tends
4080 to generate a spurious hotplug event about three
4081 seconds later. So just do it once.
4082 */
4083 if (IS_G4X(dev))
4084 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Daniel Vetter85fc95b2013-03-27 15:47:11 +01004085 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
Egbert Eichbac56d52013-02-25 12:06:51 -05004086 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004087
Egbert Eichbac56d52013-02-25 12:06:51 -05004088 /* Ignore TV since it's buggy */
4089 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4090 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01004091}
4092
Daniel Vetterff1f5252012-10-02 15:10:55 +02004093static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004094{
Daniel Vetter45a83f82014-05-12 19:17:55 +02004095 struct drm_device *dev = arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03004096 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004097 u32 iir, new_iir;
4098 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01004099 int ret = IRQ_NONE, pipe;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02004100 u32 flip_mask =
4101 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4102 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004103
Chris Wilsona266c7d2012-04-24 22:59:44 +01004104 iir = I915_READ(IIR);
4105
Chris Wilsona266c7d2012-04-24 22:59:44 +01004106 for (;;) {
Ville Syrjälä501e01d2014-01-17 11:35:15 +02004107 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson2c8ba292012-04-24 22:59:46 +01004108 bool blc_event = false;
4109
Chris Wilsona266c7d2012-04-24 22:59:44 +01004110 /* Can't rely on pipestat interrupt bit in iir as it might
4111 * have been cleared after the pipestat interrupt was received.
4112 * It doesn't set the bit in iir again, but it still produces
4113 * interrupts (for non-MSI).
4114 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02004115 spin_lock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004116 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Mika Kuoppala58174462014-02-25 17:11:26 +02004117 i915_handle_error(dev, false,
4118 "Command parser error, iir 0x%08x",
4119 iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004120
Damien Lespiau055e3932014-08-18 13:49:10 +01004121 for_each_pipe(dev_priv, pipe) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004122 int reg = PIPESTAT(pipe);
4123 pipe_stats[pipe] = I915_READ(reg);
4124
4125 /*
4126 * Clear the PIPE*STAT regs before the IIR
4127 */
4128 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004129 I915_WRITE(reg, pipe_stats[pipe]);
Ville Syrjälä501e01d2014-01-17 11:35:15 +02004130 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004131 }
4132 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02004133 spin_unlock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004134
4135 if (!irq_received)
4136 break;
4137
4138 ret = IRQ_HANDLED;
4139
4140 /* Consume port. Then clear IIR or we'll miss events */
Ville Syrjälä16c6c562014-04-01 10:54:36 +03004141 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4142 i9xx_hpd_irq_handler(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004143
Ville Syrjälä21ad8332013-02-19 15:16:39 +02004144 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004145 new_iir = I915_READ(IIR); /* Flush posted writes */
4146
Chris Wilsona266c7d2012-04-24 22:59:44 +01004147 if (iir & I915_USER_INTERRUPT)
4148 notify_ring(dev, &dev_priv->ring[RCS]);
4149 if (iir & I915_BSD_USER_INTERRUPT)
4150 notify_ring(dev, &dev_priv->ring[VCS]);
4151
Damien Lespiau055e3932014-08-18 13:49:10 +01004152 for_each_pipe(dev_priv, pipe) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01004153 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02004154 i915_handle_vblank(dev, pipe, pipe, iir))
4155 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004156
4157 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4158 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02004159
4160 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02004161 i9xx_pipe_crc_irq_handler(dev, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004162
Daniel Vetter1f7247c2014-09-30 10:56:48 +02004163 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4164 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02004165 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01004166
4167 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4168 intel_opregion_asle_intr(dev);
4169
Daniel Vetter515ac2b2012-12-01 13:53:44 +01004170 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4171 gmbus_irq_handler(dev);
4172
Chris Wilsona266c7d2012-04-24 22:59:44 +01004173 /* With MSI, interrupts are only generated when iir
4174 * transitions from zero to nonzero. If another bit got
4175 * set while we were handling the existing iir bits, then
4176 * we would never get another interrupt.
4177 *
4178 * This is fine on non-MSI as well, as if we hit this path
4179 * we avoid exiting the interrupt handler only to generate
4180 * another one.
4181 *
4182 * Note that for MSI this could cause a stray interrupt report
4183 * if an interrupt landed in the time between writing IIR and
4184 * the posting read. This should be rare enough to never
4185 * trigger the 99% of 100,000 interrupts test for disabling
4186 * stray interrupts.
4187 */
4188 iir = new_iir;
4189 }
4190
Daniel Vetterd05c6172012-04-26 23:28:09 +02004191 i915_update_dri1_breadcrumb(dev);
Chris Wilson2c8ba292012-04-24 22:59:46 +01004192
Chris Wilsona266c7d2012-04-24 22:59:44 +01004193 return ret;
4194}
4195
4196static void i965_irq_uninstall(struct drm_device * dev)
4197{
Jani Nikula2d1013d2014-03-31 14:27:17 +03004198 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004199 int pipe;
4200
4201 if (!dev_priv)
4202 return;
4203
Chris Wilsonadca4732012-05-11 18:01:31 +01004204 I915_WRITE(PORT_HOTPLUG_EN, 0);
4205 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01004206
4207 I915_WRITE(HWSTAM, 0xffffffff);
Damien Lespiau055e3932014-08-18 13:49:10 +01004208 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004209 I915_WRITE(PIPESTAT(pipe), 0);
4210 I915_WRITE(IMR, 0xffffffff);
4211 I915_WRITE(IER, 0x0);
4212
Damien Lespiau055e3932014-08-18 13:49:10 +01004213 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004214 I915_WRITE(PIPESTAT(pipe),
4215 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4216 I915_WRITE(IIR, I915_READ(IIR));
4217}
4218
Daniel Vetter4cb21832014-09-15 14:55:26 +02004219static void intel_hpd_irq_reenable_work(struct work_struct *work)
Egbert Eichac4c16c2013-04-16 13:36:58 +02004220{
Imre Deak63237512014-08-18 15:37:02 +03004221 struct drm_i915_private *dev_priv =
4222 container_of(work, typeof(*dev_priv),
4223 hotplug_reenable_work.work);
Egbert Eichac4c16c2013-04-16 13:36:58 +02004224 struct drm_device *dev = dev_priv->dev;
4225 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichac4c16c2013-04-16 13:36:58 +02004226 int i;
4227
Imre Deak63237512014-08-18 15:37:02 +03004228 intel_runtime_pm_get(dev_priv);
4229
Daniel Vetter4cb21832014-09-15 14:55:26 +02004230 spin_lock_irq(&dev_priv->irq_lock);
Egbert Eichac4c16c2013-04-16 13:36:58 +02004231 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4232 struct drm_connector *connector;
4233
4234 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4235 continue;
4236
4237 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4238
4239 list_for_each_entry(connector, &mode_config->connector_list, head) {
4240 struct intel_connector *intel_connector = to_intel_connector(connector);
4241
4242 if (intel_connector->encoder->hpd_pin == i) {
4243 if (connector->polled != intel_connector->polled)
4244 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03004245 connector->name);
Egbert Eichac4c16c2013-04-16 13:36:58 +02004246 connector->polled = intel_connector->polled;
4247 if (!connector->polled)
4248 connector->polled = DRM_CONNECTOR_POLL_HPD;
4249 }
4250 }
4251 }
4252 if (dev_priv->display.hpd_irq_setup)
4253 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetter4cb21832014-09-15 14:55:26 +02004254 spin_unlock_irq(&dev_priv->irq_lock);
Imre Deak63237512014-08-18 15:37:02 +03004255
4256 intel_runtime_pm_put(dev_priv);
Egbert Eichac4c16c2013-04-16 13:36:58 +02004257}
4258
Daniel Vetterfca52a52014-09-30 10:56:45 +02004259/**
4260 * intel_irq_init - initializes irq support
4261 * @dev_priv: i915 device instance
4262 *
4263 * This function initializes all the irq support including work items, timers
4264 * and all the vtables. It does not setup the interrupt itself though.
4265 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004266void intel_irq_init(struct drm_i915_private *dev_priv)
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004267{
Daniel Vetterb9632912014-09-30 10:56:44 +02004268 struct drm_device *dev = dev_priv->dev;
Chris Wilson8b2e3262012-04-24 22:59:41 +01004269
4270 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
Dave Airlie13cf5502014-06-18 11:29:35 +10004271 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
Daniel Vetter99584db2012-11-14 17:14:04 +01004272 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02004273 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004274 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Chris Wilson8b2e3262012-04-24 22:59:41 +01004275
Deepak Sa6706b42014-03-15 20:23:22 +05304276 /* Let's track the enabled rps events */
Daniel Vetterb9632912014-09-30 10:56:44 +02004277 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
Ville Syrjälä6c65a582014-08-29 14:14:07 +03004278 /* WaGsvRC0ResidencyMethod:vlv */
Deepak S31685c22014-07-03 17:33:01 -04004279 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4280 else
4281 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
Deepak Sa6706b42014-03-15 20:23:22 +05304282
Daniel Vetter99584db2012-11-14 17:14:04 +01004283 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4284 i915_hangcheck_elapsed,
Daniel Vetter61bac782012-12-01 21:03:21 +01004285 (unsigned long) dev);
Imre Deak63237512014-08-18 15:37:02 +03004286 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
Daniel Vetter4cb21832014-09-15 14:55:26 +02004287 intel_hpd_irq_reenable_work);
Daniel Vetter61bac782012-12-01 21:03:21 +01004288
Tomas Janousek97a19a22012-12-08 13:48:13 +01004289 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01004290
Daniel Vetterb9632912014-09-30 10:56:44 +02004291 if (IS_GEN2(dev_priv)) {
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +03004292 dev->max_vblank_count = 0;
4293 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
Daniel Vetterb9632912014-09-30 10:56:44 +02004294 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004295 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4296 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
Ville Syrjälä391f75e2013-09-25 19:55:26 +03004297 } else {
4298 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4299 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004300 }
4301
Ville Syrjälä21da2702014-08-06 14:49:55 +03004302 /*
4303 * Opt out of the vblank disable timer on everything except gen2.
4304 * Gen2 doesn't have a hardware frame counter and so depends on
4305 * vblank interrupts to produce sane vblank seuquence numbers.
4306 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004307 if (!IS_GEN2(dev_priv))
Ville Syrjälä21da2702014-08-06 14:49:55 +03004308 dev->vblank_disable_immediate = true;
4309
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03004310 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
Keith Packardc3613de2011-08-12 17:05:54 -07004311 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03004312 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4313 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004314
Daniel Vetterb9632912014-09-30 10:56:44 +02004315 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004316 dev->driver->irq_handler = cherryview_irq_handler;
4317 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4318 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4319 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4320 dev->driver->enable_vblank = valleyview_enable_vblank;
4321 dev->driver->disable_vblank = valleyview_disable_vblank;
4322 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Daniel Vetterb9632912014-09-30 10:56:44 +02004323 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07004324 dev->driver->irq_handler = valleyview_irq_handler;
4325 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4326 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4327 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4328 dev->driver->enable_vblank = valleyview_enable_vblank;
4329 dev->driver->disable_vblank = valleyview_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05004330 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Daniel Vetterb9632912014-09-30 10:56:44 +02004331 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
Ben Widawskyabd58f02013-11-02 21:07:09 -07004332 dev->driver->irq_handler = gen8_irq_handler;
Daniel Vetter723761b2014-05-22 17:56:34 +02004333 dev->driver->irq_preinstall = gen8_irq_reset;
Ben Widawskyabd58f02013-11-02 21:07:09 -07004334 dev->driver->irq_postinstall = gen8_irq_postinstall;
4335 dev->driver->irq_uninstall = gen8_irq_uninstall;
4336 dev->driver->enable_vblank = gen8_enable_vblank;
4337 dev->driver->disable_vblank = gen8_disable_vblank;
4338 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004339 } else if (HAS_PCH_SPLIT(dev)) {
4340 dev->driver->irq_handler = ironlake_irq_handler;
Daniel Vetter723761b2014-05-22 17:56:34 +02004341 dev->driver->irq_preinstall = ironlake_irq_reset;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004342 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4343 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4344 dev->driver->enable_vblank = ironlake_enable_vblank;
4345 dev->driver->disable_vblank = ironlake_disable_vblank;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01004346 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004347 } else {
Daniel Vetterb9632912014-09-30 10:56:44 +02004348 if (INTEL_INFO(dev_priv)->gen == 2) {
Chris Wilsonc2798b12012-04-22 21:13:57 +01004349 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4350 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4351 dev->driver->irq_handler = i8xx_irq_handler;
4352 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Daniel Vetterb9632912014-09-30 10:56:44 +02004353 } else if (INTEL_INFO(dev_priv)->gen == 3) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004354 dev->driver->irq_preinstall = i915_irq_preinstall;
4355 dev->driver->irq_postinstall = i915_irq_postinstall;
4356 dev->driver->irq_uninstall = i915_irq_uninstall;
4357 dev->driver->irq_handler = i915_irq_handler;
Daniel Vetter20afbda2012-12-11 14:05:07 +01004358 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004359 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004360 dev->driver->irq_preinstall = i965_irq_preinstall;
4361 dev->driver->irq_postinstall = i965_irq_postinstall;
4362 dev->driver->irq_uninstall = i965_irq_uninstall;
4363 dev->driver->irq_handler = i965_irq_handler;
Egbert Eichbac56d52013-02-25 12:06:51 -05004364 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004365 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004366 dev->driver->enable_vblank = i915_enable_vblank;
4367 dev->driver->disable_vblank = i915_disable_vblank;
4368 }
4369}
Daniel Vetter20afbda2012-12-11 14:05:07 +01004370
Daniel Vetterfca52a52014-09-30 10:56:45 +02004371/**
4372 * intel_hpd_init - initializes and enables hpd support
4373 * @dev_priv: i915 device instance
4374 *
4375 * This function enables the hotplug support. It requires that interrupts have
4376 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4377 * poll request can run concurrently to other code, so locking rules must be
4378 * obeyed.
4379 *
4380 * This is a separate step from interrupt enabling to simplify the locking rules
4381 * in the driver load and resume code.
4382 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004383void intel_hpd_init(struct drm_i915_private *dev_priv)
Daniel Vetter20afbda2012-12-11 14:05:07 +01004384{
Daniel Vetterb9632912014-09-30 10:56:44 +02004385 struct drm_device *dev = dev_priv->dev;
Egbert Eich821450c2013-04-16 13:36:55 +02004386 struct drm_mode_config *mode_config = &dev->mode_config;
4387 struct drm_connector *connector;
4388 int i;
Daniel Vetter20afbda2012-12-11 14:05:07 +01004389
Egbert Eich821450c2013-04-16 13:36:55 +02004390 for (i = 1; i < HPD_NUM_PINS; i++) {
4391 dev_priv->hpd_stats[i].hpd_cnt = 0;
4392 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4393 }
4394 list_for_each_entry(connector, &mode_config->connector_list, head) {
4395 struct intel_connector *intel_connector = to_intel_connector(connector);
4396 connector->polled = intel_connector->polled;
Dave Airlie0e32b392014-05-02 14:02:48 +10004397 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4398 connector->polled = DRM_CONNECTOR_POLL_HPD;
4399 if (intel_connector->mst_port)
Egbert Eich821450c2013-04-16 13:36:55 +02004400 connector->polled = DRM_CONNECTOR_POLL_HPD;
4401 }
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02004402
4403 /* Interrupt setup is already guaranteed to be single-threaded, this is
4404 * just to make the assert_spin_locked checks happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02004405 spin_lock_irq(&dev_priv->irq_lock);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004406 if (dev_priv->display.hpd_irq_setup)
4407 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterd6207432014-09-15 14:55:27 +02004408 spin_unlock_irq(&dev_priv->irq_lock);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004409}
Paulo Zanonic67a4702013-08-19 13:18:09 -03004410
Daniel Vetterfca52a52014-09-30 10:56:45 +02004411/**
4412 * intel_irq_install - enables the hardware interrupt
4413 * @dev_priv: i915 device instance
4414 *
4415 * This function enables the hardware interrupt handling, but leaves the hotplug
4416 * handling still disabled. It is called after intel_irq_init().
4417 *
4418 * In the driver load and resume code we need working interrupts in a few places
4419 * but don't want to deal with the hassle of concurrent probe and hotplug
4420 * workers. Hence the split into this two-stage approach.
4421 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004422int intel_irq_install(struct drm_i915_private *dev_priv)
4423{
4424 /*
4425 * We enable some interrupt sources in our postinstall hooks, so mark
4426 * interrupts as enabled _before_ actually enabling them to avoid
4427 * special cases in our ordering checks.
4428 */
4429 dev_priv->pm.irqs_enabled = true;
4430
4431 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4432}
4433
Daniel Vetterfca52a52014-09-30 10:56:45 +02004434/**
4435 * intel_irq_uninstall - finilizes all irq handling
4436 * @dev_priv: i915 device instance
4437 *
4438 * This stops interrupt and hotplug handling and unregisters and frees all
4439 * resources acquired in the init functions.
4440 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004441void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4442{
4443 drm_irq_uninstall(dev_priv->dev);
4444 intel_hpd_cancel_work(dev_priv);
4445 dev_priv->pm.irqs_enabled = false;
4446}
4447
Daniel Vetterfca52a52014-09-30 10:56:45 +02004448/**
4449 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4450 * @dev_priv: i915 device instance
4451 *
4452 * This function is used to disable interrupts at runtime, both in the runtime
4453 * pm and the system suspend/resume code.
4454 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004455void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004456{
Daniel Vetterb9632912014-09-30 10:56:44 +02004457 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004458 dev_priv->pm.irqs_enabled = false;
Paulo Zanonic67a4702013-08-19 13:18:09 -03004459}
4460
Daniel Vetterfca52a52014-09-30 10:56:45 +02004461/**
4462 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4463 * @dev_priv: i915 device instance
4464 *
4465 * This function is used to enable interrupts at runtime, both in the runtime
4466 * pm and the system suspend/resume code.
4467 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004468void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004469{
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004470 dev_priv->pm.irqs_enabled = true;
Daniel Vetterb9632912014-09-30 10:56:44 +02004471 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4472 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004473}