blob: a72e635a88b806223efd5a32eec9d6033a183805 [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Damien Lespiaub2c88f52013-10-15 18:55:29 +010033#include <linux/circ_buf.h>
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/drmP.h>
35#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010037#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Egbert Eiche5868a32013-02-28 04:17:12 -050040static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46};
47
48static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010050 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050051 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54};
55
56static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63};
64
Daniel Vetter704cfb82013-12-18 09:08:43 +010065static const u32 hpd_status_g4x[] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050066 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72};
73
Egbert Eiche5868a32013-02-28 04:17:12 -050074static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81};
82
Paulo Zanoni5c502442014-04-01 15:37:11 -030083/* IIR can theoretically queue up two events. Be paranoid. */
Paulo Zanonif86f3fb2014-04-01 15:37:14 -030084#define GEN8_IRQ_RESET_NDX(type, which) do { \
Paulo Zanoni5c502442014-04-01 15:37:11 -030085 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
92} while (0)
93
Paulo Zanonif86f3fb2014-04-01 15:37:14 -030094#define GEN5_IRQ_RESET(type) do { \
Paulo Zanonia9d356a2014-04-01 15:37:09 -030095 I915_WRITE(type##IMR, 0xffffffff); \
Paulo Zanoni5c502442014-04-01 15:37:11 -030096 POSTING_READ(type##IMR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -030097 I915_WRITE(type##IER, 0); \
Paulo Zanoni5c502442014-04-01 15:37:11 -030098 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300102} while (0)
103
Paulo Zanoni337ba012014-04-01 15:37:16 -0300104/*
105 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
106 */
107#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
109 if (val) { \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
111 (reg), val); \
112 I915_WRITE((reg), 0xffffffff); \
113 POSTING_READ(reg); \
114 I915_WRITE((reg), 0xffffffff); \
115 POSTING_READ(reg); \
116 } \
117} while (0)
118
Paulo Zanoni35079892014-04-01 15:37:15 -0300119#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
Paulo Zanoni337ba012014-04-01 15:37:16 -0300120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
124} while (0)
125
126#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
Paulo Zanoni337ba012014-04-01 15:37:16 -0300127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
131} while (0)
132
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800133/* For display hotplug interrupt */
Chris Wilson995b6762010-08-20 13:23:26 +0100134static void
Jani Nikula2d1013d2014-03-31 14:27:17 +0300135ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800136{
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200137 assert_spin_locked(&dev_priv->irq_lock);
138
Paulo Zanoni730488b2014-03-07 20:12:32 -0300139 if (WARN_ON(dev_priv->pm.irqs_disabled))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300140 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300141
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000142 if ((dev_priv->irq_mask & mask) != 0) {
143 dev_priv->irq_mask &= ~mask;
144 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000145 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800146 }
147}
148
Paulo Zanoni0ff98002013-02-22 17:05:31 -0300149static void
Jani Nikula2d1013d2014-03-31 14:27:17 +0300150ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800151{
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200152 assert_spin_locked(&dev_priv->irq_lock);
153
Paulo Zanoni730488b2014-03-07 20:12:32 -0300154 if (WARN_ON(dev_priv->pm.irqs_disabled))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300155 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300156
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000157 if ((dev_priv->irq_mask & mask) != mask) {
158 dev_priv->irq_mask |= mask;
159 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000160 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800161 }
162}
163
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300164/**
165 * ilk_update_gt_irq - update GTIMR
166 * @dev_priv: driver private
167 * @interrupt_mask: mask of interrupt bits to update
168 * @enabled_irq_mask: mask of interrupt bits to enable
169 */
170static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
171 uint32_t interrupt_mask,
172 uint32_t enabled_irq_mask)
173{
174 assert_spin_locked(&dev_priv->irq_lock);
175
Paulo Zanoni730488b2014-03-07 20:12:32 -0300176 if (WARN_ON(dev_priv->pm.irqs_disabled))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300177 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300178
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300179 dev_priv->gt_irq_mask &= ~interrupt_mask;
180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
181 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
182 POSTING_READ(GTIMR);
183}
184
185void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
186{
187 ilk_update_gt_irq(dev_priv, mask, mask);
188}
189
190void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
191{
192 ilk_update_gt_irq(dev_priv, mask, 0);
193}
194
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300195/**
196 * snb_update_pm_irq - update GEN6_PMIMR
197 * @dev_priv: driver private
198 * @interrupt_mask: mask of interrupt bits to update
199 * @enabled_irq_mask: mask of interrupt bits to enable
200 */
201static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
202 uint32_t interrupt_mask,
203 uint32_t enabled_irq_mask)
204{
Paulo Zanoni605cd252013-08-06 18:57:15 -0300205 uint32_t new_val;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300206
207 assert_spin_locked(&dev_priv->irq_lock);
208
Paulo Zanoni730488b2014-03-07 20:12:32 -0300209 if (WARN_ON(dev_priv->pm.irqs_disabled))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300210 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300211
Paulo Zanoni605cd252013-08-06 18:57:15 -0300212 new_val = dev_priv->pm_irq_mask;
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300213 new_val &= ~interrupt_mask;
214 new_val |= (~enabled_irq_mask & interrupt_mask);
215
Paulo Zanoni605cd252013-08-06 18:57:15 -0300216 if (new_val != dev_priv->pm_irq_mask) {
217 dev_priv->pm_irq_mask = new_val;
218 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300219 POSTING_READ(GEN6_PMIMR);
220 }
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300221}
222
223void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
224{
225 snb_update_pm_irq(dev_priv, mask, mask);
226}
227
228void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
229{
230 snb_update_pm_irq(dev_priv, mask, 0);
231}
232
Paulo Zanoni86642812013-04-12 17:57:57 -0300233static bool ivb_can_enable_err_int(struct drm_device *dev)
234{
235 struct drm_i915_private *dev_priv = dev->dev_private;
236 struct intel_crtc *crtc;
237 enum pipe pipe;
238
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200239 assert_spin_locked(&dev_priv->irq_lock);
240
Paulo Zanoni86642812013-04-12 17:57:57 -0300241 for_each_pipe(pipe) {
242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
243
244 if (crtc->cpu_fifo_underrun_disabled)
245 return false;
246 }
247
248 return true;
249}
250
251static bool cpt_can_enable_serr_int(struct drm_device *dev)
252{
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 enum pipe pipe;
255 struct intel_crtc *crtc;
256
Daniel Vetterfee884e2013-07-04 23:35:21 +0200257 assert_spin_locked(&dev_priv->irq_lock);
258
Paulo Zanoni86642812013-04-12 17:57:57 -0300259 for_each_pipe(pipe) {
260 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
261
262 if (crtc->pch_fifo_underrun_disabled)
263 return false;
264 }
265
266 return true;
267}
268
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +0200269static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe)
270{
271 struct drm_i915_private *dev_priv = dev->dev_private;
272 u32 reg = PIPESTAT(pipe);
273 u32 pipestat = I915_READ(reg) & 0x7fff0000;
274
275 assert_spin_locked(&dev_priv->irq_lock);
276
277 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
278 POSTING_READ(reg);
279}
280
Paulo Zanoni86642812013-04-12 17:57:57 -0300281static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
282 enum pipe pipe, bool enable)
283{
284 struct drm_i915_private *dev_priv = dev->dev_private;
285 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
286 DE_PIPEB_FIFO_UNDERRUN;
287
288 if (enable)
289 ironlake_enable_display_irq(dev_priv, bit);
290 else
291 ironlake_disable_display_irq(dev_priv, bit);
292}
293
294static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
Daniel Vetter7336df62013-07-09 22:59:16 +0200295 enum pipe pipe, bool enable)
Paulo Zanoni86642812013-04-12 17:57:57 -0300296{
297 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni86642812013-04-12 17:57:57 -0300298 if (enable) {
Daniel Vetter7336df62013-07-09 22:59:16 +0200299 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
300
Paulo Zanoni86642812013-04-12 17:57:57 -0300301 if (!ivb_can_enable_err_int(dev))
302 return;
303
Paulo Zanoni86642812013-04-12 17:57:57 -0300304 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
305 } else {
Daniel Vetter7336df62013-07-09 22:59:16 +0200306 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
307
308 /* Change the state _after_ we've read out the current one. */
Paulo Zanoni86642812013-04-12 17:57:57 -0300309 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
Daniel Vetter7336df62013-07-09 22:59:16 +0200310
311 if (!was_enabled &&
312 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
313 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
314 pipe_name(pipe));
315 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300316 }
317}
318
Daniel Vetter38d83c962013-11-07 11:05:46 +0100319static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
320 enum pipe pipe, bool enable)
321{
322 struct drm_i915_private *dev_priv = dev->dev_private;
323
324 assert_spin_locked(&dev_priv->irq_lock);
325
326 if (enable)
327 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
328 else
329 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
330 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
331 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
332}
333
Daniel Vetterfee884e2013-07-04 23:35:21 +0200334/**
335 * ibx_display_interrupt_update - update SDEIMR
336 * @dev_priv: driver private
337 * @interrupt_mask: mask of interrupt bits to update
338 * @enabled_irq_mask: mask of interrupt bits to enable
339 */
340static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
341 uint32_t interrupt_mask,
342 uint32_t enabled_irq_mask)
343{
344 uint32_t sdeimr = I915_READ(SDEIMR);
345 sdeimr &= ~interrupt_mask;
346 sdeimr |= (~enabled_irq_mask & interrupt_mask);
347
348 assert_spin_locked(&dev_priv->irq_lock);
349
Paulo Zanoni730488b2014-03-07 20:12:32 -0300350 if (WARN_ON(dev_priv->pm.irqs_disabled))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300351 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300352
Daniel Vetterfee884e2013-07-04 23:35:21 +0200353 I915_WRITE(SDEIMR, sdeimr);
354 POSTING_READ(SDEIMR);
355}
356#define ibx_enable_display_interrupt(dev_priv, bits) \
357 ibx_display_interrupt_update((dev_priv), (bits), (bits))
358#define ibx_disable_display_interrupt(dev_priv, bits) \
359 ibx_display_interrupt_update((dev_priv), (bits), 0)
360
Daniel Vetterde280752013-07-04 23:35:24 +0200361static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
362 enum transcoder pch_transcoder,
Paulo Zanoni86642812013-04-12 17:57:57 -0300363 bool enable)
364{
Paulo Zanoni86642812013-04-12 17:57:57 -0300365 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200366 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
367 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
Paulo Zanoni86642812013-04-12 17:57:57 -0300368
369 if (enable)
Daniel Vetterfee884e2013-07-04 23:35:21 +0200370 ibx_enable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300371 else
Daniel Vetterfee884e2013-07-04 23:35:21 +0200372 ibx_disable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300373}
374
375static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
376 enum transcoder pch_transcoder,
377 bool enable)
378{
379 struct drm_i915_private *dev_priv = dev->dev_private;
380
381 if (enable) {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200382 I915_WRITE(SERR_INT,
383 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
384
Paulo Zanoni86642812013-04-12 17:57:57 -0300385 if (!cpt_can_enable_serr_int(dev))
386 return;
387
Daniel Vetterfee884e2013-07-04 23:35:21 +0200388 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Paulo Zanoni86642812013-04-12 17:57:57 -0300389 } else {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200390 uint32_t tmp = I915_READ(SERR_INT);
391 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
392
393 /* Change the state _after_ we've read out the current one. */
Daniel Vetterfee884e2013-07-04 23:35:21 +0200394 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200395
396 if (!was_enabled &&
397 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
398 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
399 transcoder_name(pch_transcoder));
400 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300401 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300402}
403
404/**
405 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
406 * @dev: drm device
407 * @pipe: pipe
408 * @enable: true if we want to report FIFO underrun errors, false otherwise
409 *
410 * This function makes us disable or enable CPU fifo underruns for a specific
411 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
412 * reporting for one pipe may also disable all the other CPU error interruts for
413 * the other pipes, due to the fact that there's just one interrupt mask/enable
414 * bit for all the pipes.
415 *
416 * Returns the previous state of underrun reporting.
417 */
Imre Deakf88d42f2014-03-04 19:23:09 +0200418bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
419 enum pipe pipe, bool enable)
Paulo Zanoni86642812013-04-12 17:57:57 -0300420{
421 struct drm_i915_private *dev_priv = dev->dev_private;
422 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
423 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni86642812013-04-12 17:57:57 -0300424 bool ret;
425
Imre Deak77961eb2014-03-05 16:20:56 +0200426 assert_spin_locked(&dev_priv->irq_lock);
427
Paulo Zanoni86642812013-04-12 17:57:57 -0300428 ret = !intel_crtc->cpu_fifo_underrun_disabled;
429
430 if (enable == ret)
431 goto done;
432
433 intel_crtc->cpu_fifo_underrun_disabled = !enable;
434
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +0200435 if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)))
436 i9xx_clear_fifo_underrun(dev, pipe);
437 else if (IS_GEN5(dev) || IS_GEN6(dev))
Paulo Zanoni86642812013-04-12 17:57:57 -0300438 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
439 else if (IS_GEN7(dev))
Daniel Vetter7336df62013-07-09 22:59:16 +0200440 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
Daniel Vetter38d83c962013-11-07 11:05:46 +0100441 else if (IS_GEN8(dev))
442 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300443
444done:
Imre Deakf88d42f2014-03-04 19:23:09 +0200445 return ret;
446}
447
448bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
449 enum pipe pipe, bool enable)
450{
451 struct drm_i915_private *dev_priv = dev->dev_private;
452 unsigned long flags;
453 bool ret;
454
455 spin_lock_irqsave(&dev_priv->irq_lock, flags);
456 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300457 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Imre Deakf88d42f2014-03-04 19:23:09 +0200458
Paulo Zanoni86642812013-04-12 17:57:57 -0300459 return ret;
460}
461
Imre Deak91d181d2014-02-10 18:42:49 +0200462static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
463 enum pipe pipe)
464{
465 struct drm_i915_private *dev_priv = dev->dev_private;
466 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
467 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
468
469 return !intel_crtc->cpu_fifo_underrun_disabled;
470}
471
Paulo Zanoni86642812013-04-12 17:57:57 -0300472/**
473 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
474 * @dev: drm device
475 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
476 * @enable: true if we want to report FIFO underrun errors, false otherwise
477 *
478 * This function makes us disable or enable PCH fifo underruns for a specific
479 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
480 * underrun reporting for one transcoder may also disable all the other PCH
481 * error interruts for the other transcoders, due to the fact that there's just
482 * one interrupt mask/enable bit for all the transcoders.
483 *
484 * Returns the previous state of underrun reporting.
485 */
486bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
487 enum transcoder pch_transcoder,
488 bool enable)
489{
490 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200491 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni86642812013-04-12 17:57:57 -0300493 unsigned long flags;
494 bool ret;
495
Daniel Vetterde280752013-07-04 23:35:24 +0200496 /*
497 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
498 * has only one pch transcoder A that all pipes can use. To avoid racy
499 * pch transcoder -> pipe lookups from interrupt code simply store the
500 * underrun statistics in crtc A. Since we never expose this anywhere
501 * nor use it outside of the fifo underrun code here using the "wrong"
502 * crtc on LPT won't cause issues.
503 */
Paulo Zanoni86642812013-04-12 17:57:57 -0300504
505 spin_lock_irqsave(&dev_priv->irq_lock, flags);
506
507 ret = !intel_crtc->pch_fifo_underrun_disabled;
508
509 if (enable == ret)
510 goto done;
511
512 intel_crtc->pch_fifo_underrun_disabled = !enable;
513
514 if (HAS_PCH_IBX(dev))
Daniel Vetterde280752013-07-04 23:35:24 +0200515 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300516 else
517 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
518
519done:
520 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
521 return ret;
522}
523
524
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100525static void
Imre Deak755e9012014-02-10 18:42:47 +0200526__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
527 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800528{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200529 u32 reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200530 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800531
Daniel Vetterb79480b2013-06-27 17:52:10 +0200532 assert_spin_locked(&dev_priv->irq_lock);
533
Ville Syrjälä04feced2014-04-03 13:28:33 +0300534 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
535 status_mask & ~PIPESTAT_INT_STATUS_MASK,
536 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
537 pipe_name(pipe), enable_mask, status_mask))
Imre Deak755e9012014-02-10 18:42:47 +0200538 return;
539
540 if ((pipestat & enable_mask) == enable_mask)
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200541 return;
542
Imre Deak91d181d2014-02-10 18:42:49 +0200543 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
544
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200545 /* Enable the interrupt, clear any pending status */
Imre Deak755e9012014-02-10 18:42:47 +0200546 pipestat |= enable_mask | status_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200547 I915_WRITE(reg, pipestat);
548 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800549}
550
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100551static void
Imre Deak755e9012014-02-10 18:42:47 +0200552__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
553 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800554{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200555 u32 reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200556 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800557
Daniel Vetterb79480b2013-06-27 17:52:10 +0200558 assert_spin_locked(&dev_priv->irq_lock);
559
Ville Syrjälä04feced2014-04-03 13:28:33 +0300560 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
561 status_mask & ~PIPESTAT_INT_STATUS_MASK,
562 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
563 pipe_name(pipe), enable_mask, status_mask))
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200564 return;
565
Imre Deak755e9012014-02-10 18:42:47 +0200566 if ((pipestat & enable_mask) == 0)
567 return;
568
Imre Deak91d181d2014-02-10 18:42:49 +0200569 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
570
Imre Deak755e9012014-02-10 18:42:47 +0200571 pipestat &= ~enable_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200572 I915_WRITE(reg, pipestat);
573 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800574}
575
Imre Deak10c59c52014-02-10 18:42:48 +0200576static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
577{
578 u32 enable_mask = status_mask << 16;
579
580 /*
581 * On pipe A we don't support the PSR interrupt yet, on pipe B the
582 * same bit MBZ.
583 */
584 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
585 return 0;
586
587 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
588 SPRITE0_FLIP_DONE_INT_EN_VLV |
589 SPRITE1_FLIP_DONE_INT_EN_VLV);
590 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
591 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
592 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
593 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
594
595 return enable_mask;
596}
597
Imre Deak755e9012014-02-10 18:42:47 +0200598void
599i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
600 u32 status_mask)
601{
602 u32 enable_mask;
603
Imre Deak10c59c52014-02-10 18:42:48 +0200604 if (IS_VALLEYVIEW(dev_priv->dev))
605 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
606 status_mask);
607 else
608 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200609 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
610}
611
612void
613i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
614 u32 status_mask)
615{
616 u32 enable_mask;
617
Imre Deak10c59c52014-02-10 18:42:48 +0200618 if (IS_VALLEYVIEW(dev_priv->dev))
619 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
620 status_mask);
621 else
622 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200623 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
624}
625
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000626/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300627 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Zhao Yakui01c66882009-10-28 05:10:00 +0000628 */
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300629static void i915_enable_asle_pipestat(struct drm_device *dev)
Zhao Yakui01c66882009-10-28 05:10:00 +0000630{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300631 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000632 unsigned long irqflags;
633
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300634 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
635 return;
636
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000637 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000638
Imre Deak755e9012014-02-10 18:42:47 +0200639 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
Jani Nikulaf8987802013-04-29 13:02:53 +0300640 if (INTEL_INFO(dev)->gen >= 4)
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200641 i915_enable_pipestat(dev_priv, PIPE_A,
Imre Deak755e9012014-02-10 18:42:47 +0200642 PIPE_LEGACY_BLC_EVENT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000643
644 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000645}
646
647/**
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700648 * i915_pipe_enabled - check if a pipe is enabled
649 * @dev: DRM device
650 * @pipe: pipe to check
651 *
652 * Reading certain registers when the pipe is disabled can hang the chip.
653 * Use this routine to make sure the PLL is running and the pipe is active
654 * before reading such registers if unsure.
655 */
656static int
657i915_pipe_enabled(struct drm_device *dev, int pipe)
658{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300659 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni702e7a52012-10-23 18:29:59 -0200660
Daniel Vettera01025a2013-05-22 00:50:23 +0200661 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
662 /* Locking is horribly broken here, but whatever. */
663 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
664 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni71f8ba62013-05-03 12:15:39 -0300665
Daniel Vettera01025a2013-05-22 00:50:23 +0200666 return intel_crtc->active;
667 } else {
668 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
669 }
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700670}
671
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +0300672static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
673{
674 /* Gen2 doesn't have a hardware frame counter */
675 return 0;
676}
677
Keith Packard42f52ef2008-10-18 19:39:29 -0700678/* Called from drm generic code, passed a 'crtc', which
679 * we use as a pipe index
680 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700681static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700682{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300683 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700684 unsigned long high_frame;
685 unsigned long low_frame;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300686 u32 high1, high2, low, pixel, vbl_start;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700687
688 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800689 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800690 "pipe %c\n", pipe_name(pipe));
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700691 return 0;
692 }
693
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300694 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
695 struct intel_crtc *intel_crtc =
696 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
697 const struct drm_display_mode *mode =
698 &intel_crtc->config.adjusted_mode;
699
700 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
701 } else {
Daniel Vettera2d213d2014-02-07 16:34:05 +0100702 enum transcoder cpu_transcoder = (enum transcoder) pipe;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300703 u32 htotal;
704
705 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
706 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
707
708 vbl_start *= htotal;
709 }
710
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800711 high_frame = PIPEFRAME(pipe);
712 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100713
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700714 /*
715 * High & low register fields aren't synchronized, so make sure
716 * we get a low value that's stable across two reads of the high
717 * register.
718 */
719 do {
Chris Wilson5eddb702010-09-11 13:48:45 +0100720 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300721 low = I915_READ(low_frame);
Chris Wilson5eddb702010-09-11 13:48:45 +0100722 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700723 } while (high1 != high2);
724
Chris Wilson5eddb702010-09-11 13:48:45 +0100725 high1 >>= PIPE_FRAME_HIGH_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300726 pixel = low & PIPE_PIXEL_MASK;
Chris Wilson5eddb702010-09-11 13:48:45 +0100727 low >>= PIPE_FRAME_LOW_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300728
729 /*
730 * The frame counter increments at beginning of active.
731 * Cook up a vblank counter by also checking the pixel
732 * counter against vblank start.
733 */
Ville Syrjäläedc08d02013-11-06 13:56:27 -0200734 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700735}
736
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700737static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800738{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300739 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800740 int reg = PIPE_FRMCOUNT_GM45(pipe);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800741
742 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800743 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800744 "pipe %c\n", pipe_name(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800745 return 0;
746 }
747
748 return I915_READ(reg);
749}
750
Mario Kleinerad3543e2013-10-30 05:13:08 +0100751/* raw reads, only for fast reads of display block, no need for forcewake etc. */
752#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
Mario Kleinerad3543e2013-10-30 05:13:08 +0100753
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700754static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
Ville Syrjäläabca9e42013-10-28 20:50:48 +0200755 unsigned int flags, int *vpos, int *hpos,
756 ktime_t *stime, ktime_t *etime)
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100757{
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300758 struct drm_i915_private *dev_priv = dev->dev_private;
759 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
760 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
761 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300762 int position;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300763 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100764 bool in_vbl = true;
765 int ret = 0;
Mario Kleinerad3543e2013-10-30 05:13:08 +0100766 unsigned long irqflags;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100767
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300768 if (!intel_crtc->active) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100769 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800770 "pipe %c\n", pipe_name(pipe));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100771 return 0;
772 }
773
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300774 htotal = mode->crtc_htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300775 hsync_start = mode->crtc_hsync_start;
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300776 vtotal = mode->crtc_vtotal;
777 vbl_start = mode->crtc_vblank_start;
778 vbl_end = mode->crtc_vblank_end;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100779
Ville Syrjäläd31faf62013-10-28 16:31:41 +0200780 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
781 vbl_start = DIV_ROUND_UP(vbl_start, 2);
782 vbl_end /= 2;
783 vtotal /= 2;
784 }
785
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300786 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
787
Mario Kleinerad3543e2013-10-30 05:13:08 +0100788 /*
789 * Lock uncore.lock, as we will do multiple timing critical raw
790 * register reads, potentially with preemption disabled, so the
791 * following code must not block on uncore.lock.
792 */
793 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300794
Mario Kleinerad3543e2013-10-30 05:13:08 +0100795 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
796
797 /* Get optional system timestamp before query. */
798 if (stime)
799 *stime = ktime_get();
800
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300801 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100802 /* No obvious pixelcount register. Only query vertical
803 * scanout position from Display scan line register.
804 */
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300805 if (IS_GEN2(dev))
Mario Kleinerad3543e2013-10-30 05:13:08 +0100806 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300807 else
Mario Kleinerad3543e2013-10-30 05:13:08 +0100808 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300809
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300810 /*
811 * Scanline counter increments at leading edge of hsync, and
812 * it starts counting from vtotal-1 on the first active line.
813 * That means the scanline counter value is always one less
814 * than what we would expect. Ie. just after start of vblank,
815 * which also occurs at start of hsync (on the last active line),
816 * the scanline counter will read vblank_start-1.
817 */
818 position = (position + 1) % vtotal;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100819 } else {
820 /* Have access to pixelcount since start of frame.
821 * We can split this into vertical and horizontal
822 * scanout position.
823 */
Mario Kleinerad3543e2013-10-30 05:13:08 +0100824 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100825
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300826 /* convert to pixel counts */
827 vbl_start *= htotal;
828 vbl_end *= htotal;
829 vtotal *= htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300830
831 /*
832 * Start of vblank interrupt is triggered at start of hsync,
833 * just prior to the first active line of vblank. However we
834 * consider lines to start at the leading edge of horizontal
835 * active. So, should we get here before we've crossed into
836 * the horizontal active of the first line in vblank, we would
837 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
838 * always add htotal-hsync_start to the current pixel position.
839 */
840 position = (position + htotal - hsync_start) % vtotal;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300841 }
842
Mario Kleinerad3543e2013-10-30 05:13:08 +0100843 /* Get optional system timestamp after query. */
844 if (etime)
845 *etime = ktime_get();
846
847 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
848
849 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
850
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300851 in_vbl = position >= vbl_start && position < vbl_end;
852
853 /*
854 * While in vblank, position will be negative
855 * counting up towards 0 at vbl_end. And outside
856 * vblank, position will be positive counting
857 * up since vbl_end.
858 */
859 if (position >= vbl_start)
860 position -= vbl_end;
861 else
862 position += vtotal - vbl_end;
863
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300864 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300865 *vpos = position;
866 *hpos = 0;
867 } else {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100868 *vpos = position / htotal;
869 *hpos = position - (*vpos * htotal);
870 }
871
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100872 /* In vblank? */
873 if (in_vbl)
874 ret |= DRM_SCANOUTPOS_INVBL;
875
876 return ret;
877}
878
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700879static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100880 int *max_error,
881 struct timeval *vblank_time,
882 unsigned flags)
883{
Chris Wilson4041b852011-01-22 10:07:56 +0000884 struct drm_crtc *crtc;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100885
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700886 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
Chris Wilson4041b852011-01-22 10:07:56 +0000887 DRM_ERROR("Invalid crtc %d\n", pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100888 return -EINVAL;
889 }
890
891 /* Get drm_crtc to timestamp: */
Chris Wilson4041b852011-01-22 10:07:56 +0000892 crtc = intel_get_crtc_for_pipe(dev, pipe);
893 if (crtc == NULL) {
894 DRM_ERROR("Invalid crtc %d\n", pipe);
895 return -EINVAL;
896 }
897
898 if (!crtc->enabled) {
899 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
900 return -EBUSY;
901 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100902
903 /* Helper routine in DRM core does all the work: */
Chris Wilson4041b852011-01-22 10:07:56 +0000904 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
905 vblank_time, flags,
Ville Syrjälä7da903e2013-10-26 17:57:31 +0300906 crtc,
907 &to_intel_crtc(crtc)->config.adjusted_mode);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100908}
909
Jani Nikula67c347f2013-09-17 14:26:34 +0300910static bool intel_hpd_irq_event(struct drm_device *dev,
911 struct drm_connector *connector)
Egbert Eich321a1b32013-04-11 16:00:26 +0200912{
913 enum drm_connector_status old_status;
914
915 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
916 old_status = connector->status;
917
918 connector->status = connector->funcs->detect(connector, false);
Jani Nikula67c347f2013-09-17 14:26:34 +0300919 if (old_status == connector->status)
920 return false;
921
922 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
Egbert Eich321a1b32013-04-11 16:00:26 +0200923 connector->base.id,
924 drm_get_connector_name(connector),
Jani Nikula67c347f2013-09-17 14:26:34 +0300925 drm_get_connector_status_name(old_status),
926 drm_get_connector_status_name(connector->status));
927
928 return true;
Egbert Eich321a1b32013-04-11 16:00:26 +0200929}
930
Jesse Barnes5ca58282009-03-31 14:11:15 -0700931/*
932 * Handle hotplug events outside the interrupt handler proper.
933 */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200934#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
935
Jesse Barnes5ca58282009-03-31 14:11:15 -0700936static void i915_hotplug_work_func(struct work_struct *work)
937{
Jani Nikula2d1013d2014-03-31 14:27:17 +0300938 struct drm_i915_private *dev_priv =
939 container_of(work, struct drm_i915_private, hotplug_work);
Jesse Barnes5ca58282009-03-31 14:11:15 -0700940 struct drm_device *dev = dev_priv->dev;
Keith Packardc31c4ba2009-05-06 11:48:58 -0700941 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200942 struct intel_connector *intel_connector;
943 struct intel_encoder *intel_encoder;
944 struct drm_connector *connector;
945 unsigned long irqflags;
946 bool hpd_disabled = false;
Egbert Eich321a1b32013-04-11 16:00:26 +0200947 bool changed = false;
Egbert Eich142e2392013-04-11 15:57:57 +0200948 u32 hpd_event_bits;
Jesse Barnes5ca58282009-03-31 14:11:15 -0700949
Daniel Vetter52d7ece2012-12-01 21:03:22 +0100950 /* HPD irq before everything is fully set up. */
951 if (!dev_priv->enable_hotplug_processing)
952 return;
953
Keith Packarda65e34c2011-07-25 10:04:56 -0700954 mutex_lock(&mode_config->mutex);
Jesse Barnese67189ab2011-02-11 14:44:51 -0800955 DRM_DEBUG_KMS("running encoder hotplug functions\n");
956
Egbert Eichcd569ae2013-04-16 13:36:57 +0200957 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Egbert Eich142e2392013-04-11 15:57:57 +0200958
959 hpd_event_bits = dev_priv->hpd_event_bits;
960 dev_priv->hpd_event_bits = 0;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200961 list_for_each_entry(connector, &mode_config->connector_list, head) {
962 intel_connector = to_intel_connector(connector);
963 intel_encoder = intel_connector->encoder;
964 if (intel_encoder->hpd_pin > HPD_NONE &&
965 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
966 connector->polled == DRM_CONNECTOR_POLL_HPD) {
967 DRM_INFO("HPD interrupt storm detected on connector %s: "
968 "switching from hotplug detection to polling\n",
969 drm_get_connector_name(connector));
970 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
971 connector->polled = DRM_CONNECTOR_POLL_CONNECT
972 | DRM_CONNECTOR_POLL_DISCONNECT;
973 hpd_disabled = true;
974 }
Egbert Eich142e2392013-04-11 15:57:57 +0200975 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
976 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
977 drm_get_connector_name(connector), intel_encoder->hpd_pin);
978 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200979 }
980 /* if there were no outputs to poll, poll was disabled,
981 * therefore make sure it's enabled when disabling HPD on
982 * some connectors */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200983 if (hpd_disabled) {
Egbert Eichcd569ae2013-04-16 13:36:57 +0200984 drm_kms_helper_poll_enable(dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +0200985 mod_timer(&dev_priv->hotplug_reenable_timer,
986 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
987 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200988
989 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
990
Egbert Eich321a1b32013-04-11 16:00:26 +0200991 list_for_each_entry(connector, &mode_config->connector_list, head) {
992 intel_connector = to_intel_connector(connector);
993 intel_encoder = intel_connector->encoder;
994 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
995 if (intel_encoder->hot_plug)
996 intel_encoder->hot_plug(intel_encoder);
997 if (intel_hpd_irq_event(dev, connector))
998 changed = true;
999 }
1000 }
Keith Packard40ee3382011-07-28 15:31:19 -07001001 mutex_unlock(&mode_config->mutex);
1002
Egbert Eich321a1b32013-04-11 16:00:26 +02001003 if (changed)
1004 drm_kms_helper_hotplug_event(dev);
Jesse Barnes5ca58282009-03-31 14:11:15 -07001005}
1006
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02001007static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
1008{
1009 del_timer_sync(&dev_priv->hotplug_reenable_timer);
1010}
1011
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001012static void ironlake_rps_change_irq_handler(struct drm_device *dev)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001013{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001014 struct drm_i915_private *dev_priv = dev->dev_private;
Matthew Garrettb5b72e82010-02-02 18:30:47 +00001015 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +02001016 u8 new_delay;
Daniel Vetter92703882012-08-09 16:46:01 +02001017
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001018 spin_lock(&mchdev_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001019
Daniel Vetter73edd18f2012-08-08 23:35:37 +02001020 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1021
Daniel Vetter20e4d402012-08-08 23:35:39 +02001022 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +02001023
Jesse Barnes7648fa92010-05-20 14:28:11 -07001024 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +00001025 busy_up = I915_READ(RCPREVBSYTUPAVG);
1026 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001027 max_avg = I915_READ(RCBMAXAVG);
1028 min_avg = I915_READ(RCBMINAVG);
1029
1030 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +00001031 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +02001032 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1033 new_delay = dev_priv->ips.cur_delay - 1;
1034 if (new_delay < dev_priv->ips.max_delay)
1035 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +00001036 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +02001037 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1038 new_delay = dev_priv->ips.cur_delay + 1;
1039 if (new_delay > dev_priv->ips.min_delay)
1040 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001041 }
1042
Jesse Barnes7648fa92010-05-20 14:28:11 -07001043 if (ironlake_set_drps(dev, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +02001044 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001045
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001046 spin_unlock(&mchdev_lock);
Daniel Vetter92703882012-08-09 16:46:01 +02001047
Jesse Barnesf97108d2010-01-29 11:27:07 -08001048 return;
1049}
1050
Chris Wilson549f7362010-10-19 11:19:32 +01001051static void notify_ring(struct drm_device *dev,
1052 struct intel_ring_buffer *ring)
1053{
Chris Wilson475553d2011-01-20 09:52:56 +00001054 if (ring->obj == NULL)
1055 return;
1056
Chris Wilson814e9b52013-09-23 17:33:19 -03001057 trace_i915_gem_request_complete(ring);
Chris Wilson9862e602011-01-04 22:22:17 +00001058
Chris Wilson549f7362010-10-19 11:19:32 +01001059 wake_up_all(&ring->irq_queue);
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03001060 i915_queue_hangcheck(dev);
Chris Wilson549f7362010-10-19 11:19:32 +01001061}
1062
Ben Widawsky4912d042011-04-25 11:25:20 -07001063static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001064{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001065 struct drm_i915_private *dev_priv =
1066 container_of(work, struct drm_i915_private, rps.work);
Paulo Zanoniedbfdb42013-08-06 18:57:13 -03001067 u32 pm_iir;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001068 int new_delay, adj;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001069
Daniel Vetter59cdb632013-07-04 23:35:28 +02001070 spin_lock_irq(&dev_priv->irq_lock);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001071 pm_iir = dev_priv->rps.pm_iir;
1072 dev_priv->rps.pm_iir = 0;
Ben Widawsky48484052013-05-28 19:22:27 -07001073 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
Deepak Sa6706b42014-03-15 20:23:22 +05301074 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Daniel Vetter59cdb632013-07-04 23:35:28 +02001075 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawsky4912d042011-04-25 11:25:20 -07001076
Paulo Zanoni60611c12013-08-15 11:50:01 -03001077 /* Make sure we didn't queue anything we're not going to process. */
Deepak Sa6706b42014-03-15 20:23:22 +05301078 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
Paulo Zanoni60611c12013-08-15 11:50:01 -03001079
Deepak Sa6706b42014-03-15 20:23:22 +05301080 if ((pm_iir & dev_priv->pm_rps_events) == 0)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001081 return;
1082
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001083 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01001084
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001085 adj = dev_priv->rps.last_adj;
Ville Syrjälä74250342013-06-25 21:38:11 +03001086 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001087 if (adj > 0)
1088 adj *= 2;
1089 else
1090 adj = 1;
Ben Widawskyb39fb292014-03-19 18:31:11 -07001091 new_delay = dev_priv->rps.cur_freq + adj;
Ville Syrjälä74250342013-06-25 21:38:11 +03001092
1093 /*
1094 * For better performance, jump directly
1095 * to RPe if we're below it.
1096 */
Ben Widawskyb39fb292014-03-19 18:31:11 -07001097 if (new_delay < dev_priv->rps.efficient_freq)
1098 new_delay = dev_priv->rps.efficient_freq;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001099 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
Ben Widawskyb39fb292014-03-19 18:31:11 -07001100 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1101 new_delay = dev_priv->rps.efficient_freq;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001102 else
Ben Widawskyb39fb292014-03-19 18:31:11 -07001103 new_delay = dev_priv->rps.min_freq_softlimit;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001104 adj = 0;
1105 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1106 if (adj < 0)
1107 adj *= 2;
1108 else
1109 adj = -1;
Ben Widawskyb39fb292014-03-19 18:31:11 -07001110 new_delay = dev_priv->rps.cur_freq + adj;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001111 } else { /* unknown event */
Ben Widawskyb39fb292014-03-19 18:31:11 -07001112 new_delay = dev_priv->rps.cur_freq;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001113 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001114
Ben Widawsky79249632012-09-07 19:43:42 -07001115 /* sysfs frequency interfaces may have snuck in while servicing the
1116 * interrupt
1117 */
Ville Syrjälä1272e7b2013-11-07 19:57:49 +02001118 new_delay = clamp_t(int, new_delay,
Ben Widawskyb39fb292014-03-19 18:31:11 -07001119 dev_priv->rps.min_freq_softlimit,
1120 dev_priv->rps.max_freq_softlimit);
Deepak S27544362014-01-27 21:35:05 +05301121
Ben Widawskyb39fb292014-03-19 18:31:11 -07001122 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001123
1124 if (IS_VALLEYVIEW(dev_priv->dev))
1125 valleyview_set_rps(dev_priv->dev, new_delay);
1126 else
1127 gen6_set_rps(dev_priv->dev, new_delay);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001128
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001129 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001130}
1131
Ben Widawskye3689192012-05-25 16:56:22 -07001132
1133/**
1134 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1135 * occurred.
1136 * @work: workqueue struct
1137 *
1138 * Doesn't actually do anything except notify userspace. As a consequence of
1139 * this event, userspace should try to remap the bad rows since statistically
1140 * it is likely the same row is more likely to go bad again.
1141 */
1142static void ivybridge_parity_work(struct work_struct *work)
1143{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001144 struct drm_i915_private *dev_priv =
1145 container_of(work, struct drm_i915_private, l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001146 u32 error_status, row, bank, subbank;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001147 char *parity_event[6];
Ben Widawskye3689192012-05-25 16:56:22 -07001148 uint32_t misccpctl;
1149 unsigned long flags;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001150 uint8_t slice = 0;
Ben Widawskye3689192012-05-25 16:56:22 -07001151
1152 /* We must turn off DOP level clock gating to access the L3 registers.
1153 * In order to prevent a get/put style interface, acquire struct mutex
1154 * any time we access those registers.
1155 */
1156 mutex_lock(&dev_priv->dev->struct_mutex);
1157
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001158 /* If we've screwed up tracking, just let the interrupt fire again */
1159 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1160 goto out;
1161
Ben Widawskye3689192012-05-25 16:56:22 -07001162 misccpctl = I915_READ(GEN7_MISCCPCTL);
1163 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1164 POSTING_READ(GEN7_MISCCPCTL);
1165
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001166 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1167 u32 reg;
Ben Widawskye3689192012-05-25 16:56:22 -07001168
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001169 slice--;
1170 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1171 break;
1172
1173 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1174
1175 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1176
1177 error_status = I915_READ(reg);
1178 row = GEN7_PARITY_ERROR_ROW(error_status);
1179 bank = GEN7_PARITY_ERROR_BANK(error_status);
1180 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1181
1182 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1183 POSTING_READ(reg);
1184
1185 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1186 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1187 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1188 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1189 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1190 parity_event[5] = NULL;
1191
Dave Airlie5bdebb12013-10-11 14:07:25 +10001192 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001193 KOBJ_CHANGE, parity_event);
1194
1195 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1196 slice, row, bank, subbank);
1197
1198 kfree(parity_event[4]);
1199 kfree(parity_event[3]);
1200 kfree(parity_event[2]);
1201 kfree(parity_event[1]);
1202 }
Ben Widawskye3689192012-05-25 16:56:22 -07001203
1204 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1205
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001206out:
1207 WARN_ON(dev_priv->l3_parity.which_slice);
Ben Widawskye3689192012-05-25 16:56:22 -07001208 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001209 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
Ben Widawskye3689192012-05-25 16:56:22 -07001210 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1211
1212 mutex_unlock(&dev_priv->dev->struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001213}
1214
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001215static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
Ben Widawskye3689192012-05-25 16:56:22 -07001216{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001217 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskye3689192012-05-25 16:56:22 -07001218
Ben Widawsky040d2ba2013-09-19 11:01:40 -07001219 if (!HAS_L3_DPF(dev))
Ben Widawskye3689192012-05-25 16:56:22 -07001220 return;
1221
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001222 spin_lock(&dev_priv->irq_lock);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001223 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001224 spin_unlock(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001225
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001226 iir &= GT_PARITY_ERROR(dev);
1227 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1228 dev_priv->l3_parity.which_slice |= 1 << 1;
1229
1230 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1231 dev_priv->l3_parity.which_slice |= 1 << 0;
1232
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001233 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001234}
1235
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001236static void ilk_gt_irq_handler(struct drm_device *dev,
1237 struct drm_i915_private *dev_priv,
1238 u32 gt_iir)
1239{
1240 if (gt_iir &
1241 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1242 notify_ring(dev, &dev_priv->ring[RCS]);
1243 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1244 notify_ring(dev, &dev_priv->ring[VCS]);
1245}
1246
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001247static void snb_gt_irq_handler(struct drm_device *dev,
1248 struct drm_i915_private *dev_priv,
1249 u32 gt_iir)
1250{
1251
Ben Widawskycc609d52013-05-28 19:22:29 -07001252 if (gt_iir &
1253 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001254 notify_ring(dev, &dev_priv->ring[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001255 if (gt_iir & GT_BSD_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001256 notify_ring(dev, &dev_priv->ring[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001257 if (gt_iir & GT_BLT_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001258 notify_ring(dev, &dev_priv->ring[BCS]);
1259
Ben Widawskycc609d52013-05-28 19:22:29 -07001260 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1261 GT_BSD_CS_ERROR_INTERRUPT |
1262 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
Mika Kuoppala58174462014-02-25 17:11:26 +02001263 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1264 gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001265 }
Ben Widawskye3689192012-05-25 16:56:22 -07001266
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001267 if (gt_iir & GT_PARITY_ERROR(dev))
1268 ivybridge_parity_error_irq_handler(dev, gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001269}
1270
Ben Widawskyabd58f02013-11-02 21:07:09 -07001271static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1272 struct drm_i915_private *dev_priv,
1273 u32 master_ctl)
1274{
1275 u32 rcs, bcs, vcs;
1276 uint32_t tmp = 0;
1277 irqreturn_t ret = IRQ_NONE;
1278
1279 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1280 tmp = I915_READ(GEN8_GT_IIR(0));
1281 if (tmp) {
1282 ret = IRQ_HANDLED;
1283 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1284 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1285 if (rcs & GT_RENDER_USER_INTERRUPT)
1286 notify_ring(dev, &dev_priv->ring[RCS]);
1287 if (bcs & GT_RENDER_USER_INTERRUPT)
1288 notify_ring(dev, &dev_priv->ring[BCS]);
1289 I915_WRITE(GEN8_GT_IIR(0), tmp);
1290 } else
1291 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1292 }
1293
Zhao Yakui85f9b5f2014-04-17 10:37:38 +08001294 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
Ben Widawskyabd58f02013-11-02 21:07:09 -07001295 tmp = I915_READ(GEN8_GT_IIR(1));
1296 if (tmp) {
1297 ret = IRQ_HANDLED;
1298 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1299 if (vcs & GT_RENDER_USER_INTERRUPT)
1300 notify_ring(dev, &dev_priv->ring[VCS]);
Zhao Yakui85f9b5f2014-04-17 10:37:38 +08001301 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1302 if (vcs & GT_RENDER_USER_INTERRUPT)
1303 notify_ring(dev, &dev_priv->ring[VCS2]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001304 I915_WRITE(GEN8_GT_IIR(1), tmp);
1305 } else
1306 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1307 }
1308
1309 if (master_ctl & GEN8_GT_VECS_IRQ) {
1310 tmp = I915_READ(GEN8_GT_IIR(3));
1311 if (tmp) {
1312 ret = IRQ_HANDLED;
1313 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1314 if (vcs & GT_RENDER_USER_INTERRUPT)
1315 notify_ring(dev, &dev_priv->ring[VECS]);
1316 I915_WRITE(GEN8_GT_IIR(3), tmp);
1317 } else
1318 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1319 }
1320
1321 return ret;
1322}
1323
Egbert Eichb543fb02013-04-16 13:36:54 +02001324#define HPD_STORM_DETECT_PERIOD 1000
1325#define HPD_STORM_THRESHOLD 5
1326
Daniel Vetter10a504d2013-06-27 17:52:12 +02001327static inline void intel_hpd_irq_handler(struct drm_device *dev,
Daniel Vetter22062db2013-06-27 17:52:11 +02001328 u32 hotplug_trigger,
1329 const u32 *hpd)
Egbert Eichb543fb02013-04-16 13:36:54 +02001330{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001331 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eichb543fb02013-04-16 13:36:54 +02001332 int i;
Daniel Vetter10a504d2013-06-27 17:52:12 +02001333 bool storm_detected = false;
Egbert Eichb543fb02013-04-16 13:36:54 +02001334
Daniel Vetter91d131d2013-06-27 17:52:14 +02001335 if (!hotplug_trigger)
1336 return;
1337
Imre Deakcc9bd492014-01-16 19:56:54 +02001338 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1339 hotplug_trigger);
1340
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02001341 spin_lock(&dev_priv->irq_lock);
Egbert Eichb543fb02013-04-16 13:36:54 +02001342 for (i = 1; i < HPD_NUM_PINS; i++) {
Egbert Eich821450c2013-04-16 13:36:55 +02001343
Daniel Vetter3ff04a162014-04-24 12:03:17 +02001344 if (hpd[i] & hotplug_trigger &&
1345 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1346 /*
1347 * On GMCH platforms the interrupt mask bits only
1348 * prevent irq generation, not the setting of the
1349 * hotplug bits itself. So only WARN about unexpected
1350 * interrupts on saner platforms.
1351 */
1352 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1353 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1354 hotplug_trigger, i, hpd[i]);
1355
1356 continue;
1357 }
Egbert Eichb8f102e2013-07-26 14:14:24 +02001358
Egbert Eichb543fb02013-04-16 13:36:54 +02001359 if (!(hpd[i] & hotplug_trigger) ||
1360 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1361 continue;
1362
Jani Nikulabc5ead8c2013-05-07 15:10:29 +03001363 dev_priv->hpd_event_bits |= (1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001364 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1365 dev_priv->hpd_stats[i].hpd_last_jiffies
1366 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1367 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1368 dev_priv->hpd_stats[i].hpd_cnt = 0;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001369 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001370 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1371 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
Egbert Eich142e2392013-04-11 15:57:57 +02001372 dev_priv->hpd_event_bits &= ~(1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001373 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
Daniel Vetter10a504d2013-06-27 17:52:12 +02001374 storm_detected = true;
Egbert Eichb543fb02013-04-16 13:36:54 +02001375 } else {
1376 dev_priv->hpd_stats[i].hpd_cnt++;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001377 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1378 dev_priv->hpd_stats[i].hpd_cnt);
Egbert Eichb543fb02013-04-16 13:36:54 +02001379 }
1380 }
1381
Daniel Vetter10a504d2013-06-27 17:52:12 +02001382 if (storm_detected)
1383 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02001384 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter5876fa02013-06-27 17:52:13 +02001385
Daniel Vetter645416f2013-09-02 16:22:25 +02001386 /*
1387 * Our hotplug handler can grab modeset locks (by calling down into the
1388 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1389 * queue for otherwise the flush_work in the pageflip code will
1390 * deadlock.
1391 */
1392 schedule_work(&dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02001393}
1394
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001395static void gmbus_irq_handler(struct drm_device *dev)
1396{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001397 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter28c70f12012-12-01 13:53:45 +01001398
Daniel Vetter28c70f12012-12-01 13:53:45 +01001399 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001400}
1401
Daniel Vetterce99c252012-12-01 13:53:47 +01001402static void dp_aux_irq_handler(struct drm_device *dev)
1403{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001404 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001405
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001406 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +01001407}
1408
Shuang He8bf1e9f2013-10-15 18:55:27 +01001409#if defined(CONFIG_DEBUG_FS)
Daniel Vetter277de952013-10-18 16:37:07 +02001410static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1411 uint32_t crc0, uint32_t crc1,
1412 uint32_t crc2, uint32_t crc3,
1413 uint32_t crc4)
Shuang He8bf1e9f2013-10-15 18:55:27 +01001414{
1415 struct drm_i915_private *dev_priv = dev->dev_private;
1416 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1417 struct intel_pipe_crc_entry *entry;
Damien Lespiauac2300d2013-10-15 18:55:30 +01001418 int head, tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001419
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001420 spin_lock(&pipe_crc->lock);
1421
Damien Lespiau0c912c72013-10-15 18:55:37 +01001422 if (!pipe_crc->entries) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001423 spin_unlock(&pipe_crc->lock);
Damien Lespiau0c912c72013-10-15 18:55:37 +01001424 DRM_ERROR("spurious interrupt\n");
1425 return;
1426 }
1427
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001428 head = pipe_crc->head;
1429 tail = pipe_crc->tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001430
1431 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001432 spin_unlock(&pipe_crc->lock);
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001433 DRM_ERROR("CRC buffer overflowing\n");
1434 return;
1435 }
1436
1437 entry = &pipe_crc->entries[head];
Shuang He8bf1e9f2013-10-15 18:55:27 +01001438
Daniel Vetter8bc5e952013-10-16 22:55:49 +02001439 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
Daniel Vettereba94eb2013-10-16 22:55:46 +02001440 entry->crc[0] = crc0;
1441 entry->crc[1] = crc1;
1442 entry->crc[2] = crc2;
1443 entry->crc[3] = crc3;
1444 entry->crc[4] = crc4;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001445
1446 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001447 pipe_crc->head = head;
1448
1449 spin_unlock(&pipe_crc->lock);
Damien Lespiau07144422013-10-15 18:55:40 +01001450
1451 wake_up_interruptible(&pipe_crc->wq);
Shuang He8bf1e9f2013-10-15 18:55:27 +01001452}
Daniel Vetter277de952013-10-18 16:37:07 +02001453#else
1454static inline void
1455display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1456 uint32_t crc0, uint32_t crc1,
1457 uint32_t crc2, uint32_t crc3,
1458 uint32_t crc4) {}
1459#endif
Daniel Vettereba94eb2013-10-16 22:55:46 +02001460
Daniel Vetter277de952013-10-18 16:37:07 +02001461
1462static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vetter5a69b892013-10-16 22:55:52 +02001463{
1464 struct drm_i915_private *dev_priv = dev->dev_private;
1465
Daniel Vetter277de952013-10-18 16:37:07 +02001466 display_pipe_crc_irq_handler(dev, pipe,
1467 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1468 0, 0, 0, 0);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001469}
1470
Daniel Vetter277de952013-10-18 16:37:07 +02001471static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vettereba94eb2013-10-16 22:55:46 +02001472{
1473 struct drm_i915_private *dev_priv = dev->dev_private;
1474
Daniel Vetter277de952013-10-18 16:37:07 +02001475 display_pipe_crc_irq_handler(dev, pipe,
1476 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1477 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1478 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1479 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1480 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
Daniel Vettereba94eb2013-10-16 22:55:46 +02001481}
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001482
Daniel Vetter277de952013-10-18 16:37:07 +02001483static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001484{
1485 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001486 uint32_t res1, res2;
1487
1488 if (INTEL_INFO(dev)->gen >= 3)
1489 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1490 else
1491 res1 = 0;
1492
1493 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1494 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1495 else
1496 res2 = 0;
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001497
Daniel Vetter277de952013-10-18 16:37:07 +02001498 display_pipe_crc_irq_handler(dev, pipe,
1499 I915_READ(PIPE_CRC_RES_RED(pipe)),
1500 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1501 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1502 res1, res2);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001503}
Shuang He8bf1e9f2013-10-15 18:55:27 +01001504
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001505/* The RPS events need forcewake, so we add them to a work queue and mask their
1506 * IMR bits until the work is done. Other interrupts can be processed without
1507 * the work queue. */
1508static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
Ben Widawskybaf02a12013-05-28 19:22:24 -07001509{
Deepak Sa6706b42014-03-15 20:23:22 +05301510 if (pm_iir & dev_priv->pm_rps_events) {
Daniel Vetter59cdb632013-07-04 23:35:28 +02001511 spin_lock(&dev_priv->irq_lock);
Deepak Sa6706b42014-03-15 20:23:22 +05301512 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1513 snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
Daniel Vetter59cdb632013-07-04 23:35:28 +02001514 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter2adbee62013-07-04 23:35:27 +02001515
1516 queue_work(dev_priv->wq, &dev_priv->rps.work);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001517 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001518
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001519 if (HAS_VEBOX(dev_priv->dev)) {
1520 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1521 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
Ben Widawsky12638c52013-05-28 19:22:31 -07001522
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001523 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
Mika Kuoppala58174462014-02-25 17:11:26 +02001524 i915_handle_error(dev_priv->dev, false,
1525 "VEBOX CS error interrupt 0x%08x",
1526 pm_iir);
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001527 }
Ben Widawsky12638c52013-05-28 19:22:31 -07001528 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001529}
1530
Imre Deakc1874ed2014-02-04 21:35:46 +02001531static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1532{
1533 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak91d181d2014-02-10 18:42:49 +02001534 u32 pipe_stats[I915_MAX_PIPES] = { };
Imre Deakc1874ed2014-02-04 21:35:46 +02001535 int pipe;
1536
Imre Deak58ead0d2014-02-04 21:35:47 +02001537 spin_lock(&dev_priv->irq_lock);
Imre Deakc1874ed2014-02-04 21:35:46 +02001538 for_each_pipe(pipe) {
Imre Deak91d181d2014-02-10 18:42:49 +02001539 int reg;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001540 u32 mask, iir_bit = 0;
Imre Deak91d181d2014-02-10 18:42:49 +02001541
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001542 /*
1543 * PIPESTAT bits get signalled even when the interrupt is
1544 * disabled with the mask bits, and some of the status bits do
1545 * not generate interrupts at all (like the underrun bit). Hence
1546 * we need to be careful that we only handle what we want to
1547 * handle.
1548 */
1549 mask = 0;
1550 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
1551 mask |= PIPE_FIFO_UNDERRUN_STATUS;
1552
1553 switch (pipe) {
1554 case PIPE_A:
1555 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1556 break;
1557 case PIPE_B:
1558 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1559 break;
1560 }
1561 if (iir & iir_bit)
1562 mask |= dev_priv->pipestat_irq_mask[pipe];
1563
1564 if (!mask)
Imre Deak91d181d2014-02-10 18:42:49 +02001565 continue;
1566
1567 reg = PIPESTAT(pipe);
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001568 mask |= PIPESTAT_INT_ENABLE_MASK;
1569 pipe_stats[pipe] = I915_READ(reg) & mask;
Imre Deakc1874ed2014-02-04 21:35:46 +02001570
1571 /*
1572 * Clear the PIPE*STAT regs before the IIR
1573 */
Imre Deak91d181d2014-02-10 18:42:49 +02001574 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1575 PIPESTAT_INT_STATUS_MASK))
Imre Deakc1874ed2014-02-04 21:35:46 +02001576 I915_WRITE(reg, pipe_stats[pipe]);
1577 }
Imre Deak58ead0d2014-02-04 21:35:47 +02001578 spin_unlock(&dev_priv->irq_lock);
Imre Deakc1874ed2014-02-04 21:35:46 +02001579
1580 for_each_pipe(pipe) {
1581 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1582 drm_handle_vblank(dev, pipe);
1583
Imre Deak579a9b02014-02-04 21:35:48 +02001584 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
Imre Deakc1874ed2014-02-04 21:35:46 +02001585 intel_prepare_page_flip(dev, pipe);
1586 intel_finish_page_flip(dev, pipe);
1587 }
1588
1589 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1590 i9xx_pipe_crc_irq_handler(dev, pipe);
1591
1592 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
1593 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1594 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
1595 }
1596
1597 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1598 gmbus_irq_handler(dev);
1599}
1600
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001601static void i9xx_hpd_irq_handler(struct drm_device *dev)
1602{
1603 struct drm_i915_private *dev_priv = dev->dev_private;
1604 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1605
1606 if (IS_G4X(dev)) {
1607 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1608
1609 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
1610 } else {
1611 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1612
1613 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1614 }
1615
1616 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1617 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1618 dp_aux_irq_handler(dev);
1619
1620 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1621 /*
1622 * Make sure hotplug status is cleared before we clear IIR, or else we
1623 * may miss hotplug events.
1624 */
1625 POSTING_READ(PORT_HOTPLUG_STAT);
1626}
1627
Daniel Vetterff1f5252012-10-02 15:10:55 +02001628static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001629{
1630 struct drm_device *dev = (struct drm_device *) arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03001631 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001632 u32 iir, gt_iir, pm_iir;
1633 irqreturn_t ret = IRQ_NONE;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001634
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001635 while (true) {
1636 iir = I915_READ(VLV_IIR);
1637 gt_iir = I915_READ(GTIIR);
1638 pm_iir = I915_READ(GEN6_PMIIR);
1639
1640 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1641 goto out;
1642
1643 ret = IRQ_HANDLED;
1644
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001645 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001646
Imre Deakc1874ed2014-02-04 21:35:46 +02001647 valleyview_pipestat_irq_handler(dev, iir);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001648
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001649 /* Consume port. Then clear IIR or we'll miss events */
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001650 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1651 i9xx_hpd_irq_handler(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001652
Paulo Zanoni60611c12013-08-15 11:50:01 -03001653 if (pm_iir)
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001654 gen6_rps_irq_handler(dev_priv, pm_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001655
1656 I915_WRITE(GTIIR, gt_iir);
1657 I915_WRITE(GEN6_PMIIR, pm_iir);
1658 I915_WRITE(VLV_IIR, iir);
1659 }
1660
1661out:
1662 return ret;
1663}
1664
Adam Jackson23e81d62012-06-06 15:45:44 -04001665static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08001666{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001667 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001668 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001669 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08001670
Daniel Vetter91d131d2013-06-27 17:52:14 +02001671 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1672
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001673 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1674 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1675 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08001676 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001677 port_name(port));
1678 }
Jesse Barnes776ad802011-01-04 15:09:39 -08001679
Daniel Vetterce99c252012-12-01 13:53:47 +01001680 if (pch_iir & SDE_AUX_MASK)
1681 dp_aux_irq_handler(dev);
1682
Jesse Barnes776ad802011-01-04 15:09:39 -08001683 if (pch_iir & SDE_GMBUS)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001684 gmbus_irq_handler(dev);
Jesse Barnes776ad802011-01-04 15:09:39 -08001685
1686 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1687 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1688
1689 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1690 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1691
1692 if (pch_iir & SDE_POISON)
1693 DRM_ERROR("PCH poison interrupt\n");
1694
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001695 if (pch_iir & SDE_FDI_MASK)
1696 for_each_pipe(pipe)
1697 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1698 pipe_name(pipe),
1699 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08001700
1701 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1702 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1703
1704 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1705 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1706
Jesse Barnes776ad802011-01-04 15:09:39 -08001707 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Paulo Zanoni86642812013-04-12 17:57:57 -03001708 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1709 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001710 DRM_ERROR("PCH transcoder A FIFO underrun\n");
Paulo Zanoni86642812013-04-12 17:57:57 -03001711
1712 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1713 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1714 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001715 DRM_ERROR("PCH transcoder B FIFO underrun\n");
Paulo Zanoni86642812013-04-12 17:57:57 -03001716}
1717
1718static void ivb_err_int_handler(struct drm_device *dev)
1719{
1720 struct drm_i915_private *dev_priv = dev->dev_private;
1721 u32 err_int = I915_READ(GEN7_ERR_INT);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001722 enum pipe pipe;
Paulo Zanoni86642812013-04-12 17:57:57 -03001723
Paulo Zanonide032bf2013-04-12 17:57:58 -03001724 if (err_int & ERR_INT_POISON)
1725 DRM_ERROR("Poison interrupt\n");
1726
Daniel Vetter5a69b892013-10-16 22:55:52 +02001727 for_each_pipe(pipe) {
1728 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1729 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1730 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001731 DRM_ERROR("Pipe %c FIFO underrun\n",
1732 pipe_name(pipe));
Daniel Vetter5a69b892013-10-16 22:55:52 +02001733 }
Paulo Zanoni86642812013-04-12 17:57:57 -03001734
Daniel Vetter5a69b892013-10-16 22:55:52 +02001735 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1736 if (IS_IVYBRIDGE(dev))
Daniel Vetter277de952013-10-18 16:37:07 +02001737 ivb_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001738 else
Daniel Vetter277de952013-10-18 16:37:07 +02001739 hsw_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001740 }
1741 }
Shuang He8bf1e9f2013-10-15 18:55:27 +01001742
Paulo Zanoni86642812013-04-12 17:57:57 -03001743 I915_WRITE(GEN7_ERR_INT, err_int);
1744}
1745
1746static void cpt_serr_int_handler(struct drm_device *dev)
1747{
1748 struct drm_i915_private *dev_priv = dev->dev_private;
1749 u32 serr_int = I915_READ(SERR_INT);
1750
Paulo Zanonide032bf2013-04-12 17:57:58 -03001751 if (serr_int & SERR_INT_POISON)
1752 DRM_ERROR("PCH poison interrupt\n");
1753
Paulo Zanoni86642812013-04-12 17:57:57 -03001754 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1755 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1756 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001757 DRM_ERROR("PCH transcoder A FIFO underrun\n");
Paulo Zanoni86642812013-04-12 17:57:57 -03001758
1759 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1760 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1761 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001762 DRM_ERROR("PCH transcoder B FIFO underrun\n");
Paulo Zanoni86642812013-04-12 17:57:57 -03001763
1764 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1765 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1766 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001767 DRM_ERROR("PCH transcoder C FIFO underrun\n");
Paulo Zanoni86642812013-04-12 17:57:57 -03001768
1769 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08001770}
1771
Adam Jackson23e81d62012-06-06 15:45:44 -04001772static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1773{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001774 struct drm_i915_private *dev_priv = dev->dev_private;
Adam Jackson23e81d62012-06-06 15:45:44 -04001775 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001776 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04001777
Daniel Vetter91d131d2013-06-27 17:52:14 +02001778 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1779
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001780 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1781 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1782 SDE_AUDIO_POWER_SHIFT_CPT);
1783 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1784 port_name(port));
1785 }
Adam Jackson23e81d62012-06-06 15:45:44 -04001786
1787 if (pch_iir & SDE_AUX_MASK_CPT)
Daniel Vetterce99c252012-12-01 13:53:47 +01001788 dp_aux_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001789
1790 if (pch_iir & SDE_GMBUS_CPT)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001791 gmbus_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001792
1793 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1794 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1795
1796 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1797 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1798
1799 if (pch_iir & SDE_FDI_MASK_CPT)
1800 for_each_pipe(pipe)
1801 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1802 pipe_name(pipe),
1803 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03001804
1805 if (pch_iir & SDE_ERROR_CPT)
1806 cpt_serr_int_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001807}
1808
Paulo Zanonic008bc62013-07-12 16:35:10 -03001809static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1810{
1811 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter40da17c2013-10-21 18:04:36 +02001812 enum pipe pipe;
Paulo Zanonic008bc62013-07-12 16:35:10 -03001813
1814 if (de_iir & DE_AUX_CHANNEL_A)
1815 dp_aux_irq_handler(dev);
1816
1817 if (de_iir & DE_GSE)
1818 intel_opregion_asle_intr(dev);
1819
Paulo Zanonic008bc62013-07-12 16:35:10 -03001820 if (de_iir & DE_POISON)
1821 DRM_ERROR("Poison interrupt\n");
1822
Daniel Vetter40da17c2013-10-21 18:04:36 +02001823 for_each_pipe(pipe) {
1824 if (de_iir & DE_PIPE_VBLANK(pipe))
1825 drm_handle_vblank(dev, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03001826
Daniel Vetter40da17c2013-10-21 18:04:36 +02001827 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1828 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001829 DRM_ERROR("Pipe %c FIFO underrun\n",
1830 pipe_name(pipe));
Paulo Zanonic008bc62013-07-12 16:35:10 -03001831
Daniel Vetter40da17c2013-10-21 18:04:36 +02001832 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1833 i9xx_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001834
Daniel Vetter40da17c2013-10-21 18:04:36 +02001835 /* plane/pipes map 1:1 on ilk+ */
1836 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1837 intel_prepare_page_flip(dev, pipe);
1838 intel_finish_page_flip_plane(dev, pipe);
1839 }
Paulo Zanonic008bc62013-07-12 16:35:10 -03001840 }
1841
1842 /* check event from PCH */
1843 if (de_iir & DE_PCH_EVENT) {
1844 u32 pch_iir = I915_READ(SDEIIR);
1845
1846 if (HAS_PCH_CPT(dev))
1847 cpt_irq_handler(dev, pch_iir);
1848 else
1849 ibx_irq_handler(dev, pch_iir);
1850
1851 /* should clear PCH hotplug event before clear CPU irq */
1852 I915_WRITE(SDEIIR, pch_iir);
1853 }
1854
1855 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1856 ironlake_rps_change_irq_handler(dev);
1857}
1858
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001859static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1860{
1861 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiau07d27e22014-03-03 17:31:46 +00001862 enum pipe pipe;
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001863
1864 if (de_iir & DE_ERR_INT_IVB)
1865 ivb_err_int_handler(dev);
1866
1867 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1868 dp_aux_irq_handler(dev);
1869
1870 if (de_iir & DE_GSE_IVB)
1871 intel_opregion_asle_intr(dev);
1872
Damien Lespiau07d27e22014-03-03 17:31:46 +00001873 for_each_pipe(pipe) {
1874 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
1875 drm_handle_vblank(dev, pipe);
Daniel Vetter40da17c2013-10-21 18:04:36 +02001876
1877 /* plane/pipes map 1:1 on ilk+ */
Damien Lespiau07d27e22014-03-03 17:31:46 +00001878 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
1879 intel_prepare_page_flip(dev, pipe);
1880 intel_finish_page_flip_plane(dev, pipe);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001881 }
1882 }
1883
1884 /* check event from PCH */
1885 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1886 u32 pch_iir = I915_READ(SDEIIR);
1887
1888 cpt_irq_handler(dev, pch_iir);
1889
1890 /* clear PCH hotplug event before clear CPU irq */
1891 I915_WRITE(SDEIIR, pch_iir);
1892 }
1893}
1894
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001895static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001896{
1897 struct drm_device *dev = (struct drm_device *) arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03001898 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001899 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01001900 irqreturn_t ret = IRQ_NONE;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001901
Paulo Zanoni86642812013-04-12 17:57:57 -03001902 /* We get interrupts on unclaimed registers, so check for this before we
1903 * do any I915_{READ,WRITE}. */
Chris Wilson907b28c2013-07-19 20:36:52 +01001904 intel_uncore_check_errors(dev);
Paulo Zanoni86642812013-04-12 17:57:57 -03001905
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001906 /* disable master interrupt before clearing iir */
1907 de_ier = I915_READ(DEIER);
1908 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Paulo Zanoni23a78512013-07-12 16:35:14 -03001909 POSTING_READ(DEIER);
Chris Wilson0e434062012-05-09 21:45:44 +01001910
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001911 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1912 * interrupts will will be stored on its back queue, and then we'll be
1913 * able to process them after we restore SDEIER (as soon as we restore
1914 * it, we'll get an interrupt if SDEIIR still has something to process
1915 * due to its back queue). */
Ben Widawskyab5c6082013-04-05 13:12:41 -07001916 if (!HAS_PCH_NOP(dev)) {
1917 sde_ier = I915_READ(SDEIER);
1918 I915_WRITE(SDEIER, 0);
1919 POSTING_READ(SDEIER);
1920 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001921
Chris Wilson0e434062012-05-09 21:45:44 +01001922 gt_iir = I915_READ(GTIIR);
1923 if (gt_iir) {
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001924 if (INTEL_INFO(dev)->gen >= 6)
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001925 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001926 else
1927 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001928 I915_WRITE(GTIIR, gt_iir);
1929 ret = IRQ_HANDLED;
1930 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001931
1932 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01001933 if (de_iir) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001934 if (INTEL_INFO(dev)->gen >= 7)
1935 ivb_display_irq_handler(dev, de_iir);
1936 else
1937 ilk_display_irq_handler(dev, de_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001938 I915_WRITE(DEIIR, de_iir);
1939 ret = IRQ_HANDLED;
1940 }
1941
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001942 if (INTEL_INFO(dev)->gen >= 6) {
1943 u32 pm_iir = I915_READ(GEN6_PMIIR);
1944 if (pm_iir) {
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001945 gen6_rps_irq_handler(dev_priv, pm_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001946 I915_WRITE(GEN6_PMIIR, pm_iir);
1947 ret = IRQ_HANDLED;
1948 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001949 }
1950
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001951 I915_WRITE(DEIER, de_ier);
1952 POSTING_READ(DEIER);
Ben Widawskyab5c6082013-04-05 13:12:41 -07001953 if (!HAS_PCH_NOP(dev)) {
1954 I915_WRITE(SDEIER, sde_ier);
1955 POSTING_READ(SDEIER);
1956 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001957
1958 return ret;
1959}
1960
Ben Widawskyabd58f02013-11-02 21:07:09 -07001961static irqreturn_t gen8_irq_handler(int irq, void *arg)
1962{
1963 struct drm_device *dev = arg;
1964 struct drm_i915_private *dev_priv = dev->dev_private;
1965 u32 master_ctl;
1966 irqreturn_t ret = IRQ_NONE;
1967 uint32_t tmp = 0;
Daniel Vetterc42664c2013-11-07 11:05:40 +01001968 enum pipe pipe;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001969
Ben Widawskyabd58f02013-11-02 21:07:09 -07001970 master_ctl = I915_READ(GEN8_MASTER_IRQ);
1971 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
1972 if (!master_ctl)
1973 return IRQ_NONE;
1974
1975 I915_WRITE(GEN8_MASTER_IRQ, 0);
1976 POSTING_READ(GEN8_MASTER_IRQ);
1977
1978 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1979
1980 if (master_ctl & GEN8_DE_MISC_IRQ) {
1981 tmp = I915_READ(GEN8_DE_MISC_IIR);
1982 if (tmp & GEN8_DE_MISC_GSE)
1983 intel_opregion_asle_intr(dev);
1984 else if (tmp)
1985 DRM_ERROR("Unexpected DE Misc interrupt\n");
1986 else
1987 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
1988
1989 if (tmp) {
1990 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
1991 ret = IRQ_HANDLED;
1992 }
1993 }
1994
Daniel Vetter6d766f02013-11-07 14:49:55 +01001995 if (master_ctl & GEN8_DE_PORT_IRQ) {
1996 tmp = I915_READ(GEN8_DE_PORT_IIR);
1997 if (tmp & GEN8_AUX_CHANNEL_A)
1998 dp_aux_irq_handler(dev);
1999 else if (tmp)
2000 DRM_ERROR("Unexpected DE Port interrupt\n");
2001 else
2002 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2003
2004 if (tmp) {
2005 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2006 ret = IRQ_HANDLED;
2007 }
2008 }
2009
Daniel Vetterc42664c2013-11-07 11:05:40 +01002010 for_each_pipe(pipe) {
2011 uint32_t pipe_iir;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002012
Daniel Vetterc42664c2013-11-07 11:05:40 +01002013 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2014 continue;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002015
Daniel Vetterc42664c2013-11-07 11:05:40 +01002016 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2017 if (pipe_iir & GEN8_PIPE_VBLANK)
2018 drm_handle_vblank(dev, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002019
Damien Lespiaud0e1f1c2014-04-08 01:22:44 +01002020 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
Daniel Vetterc42664c2013-11-07 11:05:40 +01002021 intel_prepare_page_flip(dev, pipe);
2022 intel_finish_page_flip_plane(dev, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002023 }
Daniel Vetterc42664c2013-11-07 11:05:40 +01002024
Daniel Vetter0fbe7872013-11-07 11:05:44 +01002025 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2026 hsw_pipe_crc_irq_handler(dev, pipe);
2027
Daniel Vetter38d83c962013-11-07 11:05:46 +01002028 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2029 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2030 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02002031 DRM_ERROR("Pipe %c FIFO underrun\n",
2032 pipe_name(pipe));
Daniel Vetter38d83c962013-11-07 11:05:46 +01002033 }
2034
Daniel Vetter30100f22013-11-07 14:49:24 +01002035 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
2036 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2037 pipe_name(pipe),
2038 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2039 }
Daniel Vetterc42664c2013-11-07 11:05:40 +01002040
2041 if (pipe_iir) {
2042 ret = IRQ_HANDLED;
2043 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2044 } else
Ben Widawskyabd58f02013-11-02 21:07:09 -07002045 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2046 }
2047
Daniel Vetter92d03a82013-11-07 11:05:43 +01002048 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2049 /*
2050 * FIXME(BDW): Assume for now that the new interrupt handling
2051 * scheme also closed the SDE interrupt handling race we've seen
2052 * on older pch-split platforms. But this needs testing.
2053 */
2054 u32 pch_iir = I915_READ(SDEIIR);
2055
2056 cpt_irq_handler(dev, pch_iir);
2057
2058 if (pch_iir) {
2059 I915_WRITE(SDEIIR, pch_iir);
2060 ret = IRQ_HANDLED;
2061 }
2062 }
2063
Ben Widawskyabd58f02013-11-02 21:07:09 -07002064 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2065 POSTING_READ(GEN8_MASTER_IRQ);
2066
2067 return ret;
2068}
2069
Daniel Vetter17e1df02013-09-08 21:57:13 +02002070static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2071 bool reset_completed)
2072{
2073 struct intel_ring_buffer *ring;
2074 int i;
2075
2076 /*
2077 * Notify all waiters for GPU completion events that reset state has
2078 * been changed, and that they need to restart their wait after
2079 * checking for potential errors (and bail out to drop locks if there is
2080 * a gpu reset pending so that i915_error_work_func can acquire them).
2081 */
2082
2083 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2084 for_each_ring(ring, dev_priv, i)
2085 wake_up_all(&ring->irq_queue);
2086
2087 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2088 wake_up_all(&dev_priv->pending_flip_queue);
2089
2090 /*
2091 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2092 * reset state is cleared.
2093 */
2094 if (reset_completed)
2095 wake_up_all(&dev_priv->gpu_error.reset_queue);
2096}
2097
Jesse Barnes8a905232009-07-11 16:48:03 -04002098/**
2099 * i915_error_work_func - do process context error handling work
2100 * @work: work struct
2101 *
2102 * Fire an error uevent so userspace can see that a hang or error
2103 * was detected.
2104 */
2105static void i915_error_work_func(struct work_struct *work)
2106{
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002107 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2108 work);
Jani Nikula2d1013d2014-03-31 14:27:17 +03002109 struct drm_i915_private *dev_priv =
2110 container_of(error, struct drm_i915_private, gpu_error);
Jesse Barnes8a905232009-07-11 16:48:03 -04002111 struct drm_device *dev = dev_priv->dev;
Ben Widawskycce723e2013-07-19 09:16:42 -07002112 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2113 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2114 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
Daniel Vetter17e1df02013-09-08 21:57:13 +02002115 int ret;
Jesse Barnes8a905232009-07-11 16:48:03 -04002116
Dave Airlie5bdebb12013-10-11 14:07:25 +10002117 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04002118
Daniel Vetter7db0ba22012-12-06 16:23:37 +01002119 /*
2120 * Note that there's only one work item which does gpu resets, so we
2121 * need not worry about concurrent gpu resets potentially incrementing
2122 * error->reset_counter twice. We only need to take care of another
2123 * racing irq/hangcheck declaring the gpu dead for a second time. A
2124 * quick check for that is good enough: schedule_work ensures the
2125 * correct ordering between hang detection and this work item, and since
2126 * the reset in-progress bit is only ever set by code outside of this
2127 * work we don't need to worry about any other races.
2128 */
2129 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
Chris Wilsonf803aa52010-09-19 12:38:26 +01002130 DRM_DEBUG_DRIVER("resetting chip\n");
Dave Airlie5bdebb12013-10-11 14:07:25 +10002131 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
Daniel Vetter7db0ba22012-12-06 16:23:37 +01002132 reset_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002133
Daniel Vetter17e1df02013-09-08 21:57:13 +02002134 /*
Imre Deakf454c692014-04-23 01:09:04 +03002135 * In most cases it's guaranteed that we get here with an RPM
2136 * reference held, for example because there is a pending GPU
2137 * request that won't finish until the reset is done. This
2138 * isn't the case at least when we get here by doing a
2139 * simulated reset via debugs, so get an RPM reference.
2140 */
2141 intel_runtime_pm_get(dev_priv);
2142 /*
Daniel Vetter17e1df02013-09-08 21:57:13 +02002143 * All state reset _must_ be completed before we update the
2144 * reset counter, for otherwise waiters might miss the reset
2145 * pending state and not properly drop locks, resulting in
2146 * deadlocks with the reset work.
2147 */
Daniel Vetterf69061b2012-12-06 09:01:42 +01002148 ret = i915_reset(dev);
2149
Daniel Vetter17e1df02013-09-08 21:57:13 +02002150 intel_display_handle_reset(dev);
2151
Imre Deakf454c692014-04-23 01:09:04 +03002152 intel_runtime_pm_put(dev_priv);
2153
Daniel Vetterf69061b2012-12-06 09:01:42 +01002154 if (ret == 0) {
2155 /*
2156 * After all the gem state is reset, increment the reset
2157 * counter and wake up everyone waiting for the reset to
2158 * complete.
2159 *
2160 * Since unlock operations are a one-sided barrier only,
2161 * we need to insert a barrier here to order any seqno
2162 * updates before
2163 * the counter increment.
2164 */
2165 smp_mb__before_atomic_inc();
2166 atomic_inc(&dev_priv->gpu_error.reset_counter);
2167
Dave Airlie5bdebb12013-10-11 14:07:25 +10002168 kobject_uevent_env(&dev->primary->kdev->kobj,
Daniel Vetterf69061b2012-12-06 09:01:42 +01002169 KOBJ_CHANGE, reset_done_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002170 } else {
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02002171 atomic_set_mask(I915_WEDGED, &error->reset_counter);
Ben Gamarif316a422009-09-14 17:48:46 -04002172 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002173
Daniel Vetter17e1df02013-09-08 21:57:13 +02002174 /*
2175 * Note: The wake_up also serves as a memory barrier so that
2176 * waiters see the update value of the reset counter atomic_t.
2177 */
2178 i915_error_wake_up(dev_priv, true);
Ben Gamarif316a422009-09-14 17:48:46 -04002179 }
Jesse Barnes8a905232009-07-11 16:48:03 -04002180}
2181
Chris Wilson35aed2e2010-05-27 13:18:12 +01002182static void i915_report_and_clear_eir(struct drm_device *dev)
Jesse Barnes8a905232009-07-11 16:48:03 -04002183{
2184 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskybd9854f2012-08-23 15:18:09 -07002185 uint32_t instdone[I915_NUM_INSTDONE_REG];
Jesse Barnes8a905232009-07-11 16:48:03 -04002186 u32 eir = I915_READ(EIR);
Ben Widawsky050ee912012-08-22 11:32:15 -07002187 int pipe, i;
Jesse Barnes8a905232009-07-11 16:48:03 -04002188
Chris Wilson35aed2e2010-05-27 13:18:12 +01002189 if (!eir)
2190 return;
Jesse Barnes8a905232009-07-11 16:48:03 -04002191
Joe Perchesa70491c2012-03-18 13:00:11 -07002192 pr_err("render error detected, EIR: 0x%08x\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04002193
Ben Widawskybd9854f2012-08-23 15:18:09 -07002194 i915_get_extra_instdone(dev, instdone);
2195
Jesse Barnes8a905232009-07-11 16:48:03 -04002196 if (IS_G4X(dev)) {
2197 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2198 u32 ipeir = I915_READ(IPEIR_I965);
2199
Joe Perchesa70491c2012-03-18 13:00:11 -07002200 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2201 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Ben Widawsky050ee912012-08-22 11:32:15 -07002202 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2203 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Joe Perchesa70491c2012-03-18 13:00:11 -07002204 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002205 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002206 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002207 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002208 }
2209 if (eir & GM45_ERROR_PAGE_TABLE) {
2210 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002211 pr_err("page table error\n");
2212 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002213 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002214 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002215 }
2216 }
2217
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002218 if (!IS_GEN2(dev)) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002219 if (eir & I915_ERROR_PAGE_TABLE) {
2220 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002221 pr_err("page table error\n");
2222 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002223 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002224 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002225 }
2226 }
2227
2228 if (eir & I915_ERROR_MEMORY_REFRESH) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002229 pr_err("memory refresh error:\n");
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002230 for_each_pipe(pipe)
Joe Perchesa70491c2012-03-18 13:00:11 -07002231 pr_err("pipe %c stat: 0x%08x\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002232 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
Jesse Barnes8a905232009-07-11 16:48:03 -04002233 /* pipestat has already been acked */
2234 }
2235 if (eir & I915_ERROR_INSTRUCTION) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002236 pr_err("instruction error\n");
2237 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
Ben Widawsky050ee912012-08-22 11:32:15 -07002238 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2239 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002240 if (INTEL_INFO(dev)->gen < 4) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002241 u32 ipeir = I915_READ(IPEIR);
2242
Joe Perchesa70491c2012-03-18 13:00:11 -07002243 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2244 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
Joe Perchesa70491c2012-03-18 13:00:11 -07002245 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
Jesse Barnes8a905232009-07-11 16:48:03 -04002246 I915_WRITE(IPEIR, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002247 POSTING_READ(IPEIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002248 } else {
2249 u32 ipeir = I915_READ(IPEIR_I965);
2250
Joe Perchesa70491c2012-03-18 13:00:11 -07002251 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2252 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Joe Perchesa70491c2012-03-18 13:00:11 -07002253 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002254 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002255 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002256 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002257 }
2258 }
2259
2260 I915_WRITE(EIR, eir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002261 POSTING_READ(EIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002262 eir = I915_READ(EIR);
2263 if (eir) {
2264 /*
2265 * some errors might have become stuck,
2266 * mask them.
2267 */
2268 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2269 I915_WRITE(EMR, I915_READ(EMR) | eir);
2270 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2271 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01002272}
2273
2274/**
2275 * i915_handle_error - handle an error interrupt
2276 * @dev: drm device
2277 *
2278 * Do some basic checking of regsiter state at error interrupt time and
2279 * dump it to the syslog. Also call i915_capture_error_state() to make
2280 * sure we get a record and make it available in debugfs. Fire a uevent
2281 * so userspace knows something bad happened (should trigger collection
2282 * of a ring dump etc.).
2283 */
Mika Kuoppala58174462014-02-25 17:11:26 +02002284void i915_handle_error(struct drm_device *dev, bool wedged,
2285 const char *fmt, ...)
Chris Wilson35aed2e2010-05-27 13:18:12 +01002286{
2287 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala58174462014-02-25 17:11:26 +02002288 va_list args;
2289 char error_msg[80];
Chris Wilson35aed2e2010-05-27 13:18:12 +01002290
Mika Kuoppala58174462014-02-25 17:11:26 +02002291 va_start(args, fmt);
2292 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2293 va_end(args);
2294
2295 i915_capture_error_state(dev, wedged, error_msg);
Chris Wilson35aed2e2010-05-27 13:18:12 +01002296 i915_report_and_clear_eir(dev);
Jesse Barnes8a905232009-07-11 16:48:03 -04002297
Ben Gamariba1234d2009-09-14 17:48:47 -04002298 if (wedged) {
Daniel Vetterf69061b2012-12-06 09:01:42 +01002299 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2300 &dev_priv->gpu_error.reset_counter);
Ben Gamariba1234d2009-09-14 17:48:47 -04002301
Ben Gamari11ed50e2009-09-14 17:48:45 -04002302 /*
Daniel Vetter17e1df02013-09-08 21:57:13 +02002303 * Wakeup waiting processes so that the reset work function
2304 * i915_error_work_func doesn't deadlock trying to grab various
2305 * locks. By bumping the reset counter first, the woken
2306 * processes will see a reset in progress and back off,
2307 * releasing their locks and then wait for the reset completion.
2308 * We must do this for _all_ gpu waiters that might hold locks
2309 * that the reset work needs to acquire.
2310 *
2311 * Note: The wake_up serves as the required memory barrier to
2312 * ensure that the waiters see the updated value of the reset
2313 * counter atomic_t.
Ben Gamari11ed50e2009-09-14 17:48:45 -04002314 */
Daniel Vetter17e1df02013-09-08 21:57:13 +02002315 i915_error_wake_up(dev_priv, false);
Ben Gamari11ed50e2009-09-14 17:48:45 -04002316 }
2317
Daniel Vetter122f46b2013-09-04 17:36:14 +02002318 /*
2319 * Our reset work can grab modeset locks (since it needs to reset the
2320 * state of outstanding pagelips). Hence it must not be run on our own
2321 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2322 * code will deadlock.
2323 */
2324 schedule_work(&dev_priv->gpu_error.work);
Jesse Barnes8a905232009-07-11 16:48:03 -04002325}
2326
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002327static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002328{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002329 struct drm_i915_private *dev_priv = dev->dev_private;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002330 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2331 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Chris Wilson05394f32010-11-08 19:18:58 +00002332 struct drm_i915_gem_object *obj;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002333 struct intel_unpin_work *work;
2334 unsigned long flags;
2335 bool stall_detected;
2336
2337 /* Ignore early vblank irqs */
2338 if (intel_crtc == NULL)
2339 return;
2340
2341 spin_lock_irqsave(&dev->event_lock, flags);
2342 work = intel_crtc->unpin_work;
2343
Chris Wilsone7d841c2012-12-03 11:36:30 +00002344 if (work == NULL ||
2345 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2346 !work->enable_stall_check) {
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002347 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2348 spin_unlock_irqrestore(&dev->event_lock, flags);
2349 return;
2350 }
2351
2352 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
Chris Wilson05394f32010-11-08 19:18:58 +00002353 obj = work->pending_flip_obj;
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002354 if (INTEL_INFO(dev)->gen >= 4) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002355 int dspsurf = DSPSURF(intel_crtc->plane);
Armin Reese446f2542012-03-30 16:20:16 -07002356 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002357 i915_gem_obj_ggtt_offset(obj);
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002358 } else {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002359 int dspaddr = DSPADDR(intel_crtc->plane);
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002360 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
Matt Roperf4510a22014-04-01 15:22:40 -07002361 crtc->y * crtc->primary->fb->pitches[0] +
2362 crtc->x * crtc->primary->fb->bits_per_pixel/8);
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002363 }
2364
2365 spin_unlock_irqrestore(&dev->event_lock, flags);
2366
2367 if (stall_detected) {
2368 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2369 intel_prepare_page_flip(dev, intel_crtc->plane);
2370 }
2371}
2372
Keith Packard42f52ef2008-10-18 19:39:29 -07002373/* Called from drm generic code, passed 'crtc' which
2374 * we use as a pipe index
2375 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002376static int i915_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002377{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002378 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002379 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002380
Chris Wilson5eddb702010-09-11 13:48:45 +01002381 if (!i915_pipe_enabled(dev, pipe))
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002382 return -EINVAL;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002383
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002384 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002385 if (INTEL_INFO(dev)->gen >= 4)
Keith Packard7c463582008-11-04 02:03:27 -08002386 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002387 PIPE_START_VBLANK_INTERRUPT_STATUS);
Keith Packarde9d21d72008-10-16 11:31:38 -07002388 else
Keith Packard7c463582008-11-04 02:03:27 -08002389 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002390 PIPE_VBLANK_INTERRUPT_STATUS);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002391
2392 /* maintain vblank delivery even in deep C-states */
Damien Lespiau3d13ef22014-02-07 19:12:47 +00002393 if (INTEL_INFO(dev)->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002394 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002395 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002396
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002397 return 0;
2398}
2399
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002400static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002401{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002402 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesf796cf82011-04-07 13:58:17 -07002403 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002404 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c2013-10-21 18:04:36 +02002405 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002406
2407 if (!i915_pipe_enabled(dev, pipe))
2408 return -EINVAL;
2409
2410 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002411 ironlake_enable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002412 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2413
2414 return 0;
2415}
2416
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002417static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2418{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002419 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002420 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002421
2422 if (!i915_pipe_enabled(dev, pipe))
2423 return -EINVAL;
2424
2425 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002426 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002427 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002428 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2429
2430 return 0;
2431}
2432
Ben Widawskyabd58f02013-11-02 21:07:09 -07002433static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2434{
2435 struct drm_i915_private *dev_priv = dev->dev_private;
2436 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002437
2438 if (!i915_pipe_enabled(dev, pipe))
2439 return -EINVAL;
2440
2441 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter7167d7c2013-11-07 11:05:45 +01002442 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2443 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2444 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
Ben Widawskyabd58f02013-11-02 21:07:09 -07002445 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2446 return 0;
2447}
2448
Keith Packard42f52ef2008-10-18 19:39:29 -07002449/* Called from drm generic code, passed 'crtc' which
2450 * we use as a pipe index
2451 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002452static void i915_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002453{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002454 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002455 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002456
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002457 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Damien Lespiau3d13ef22014-02-07 19:12:47 +00002458 if (INTEL_INFO(dev)->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002459 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson8692d00e2011-02-05 10:08:21 +00002460
Jesse Barnesf796cf82011-04-07 13:58:17 -07002461 i915_disable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002462 PIPE_VBLANK_INTERRUPT_STATUS |
2463 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002464 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2465}
2466
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002467static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002468{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002469 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesf796cf82011-04-07 13:58:17 -07002470 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002471 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c2013-10-21 18:04:36 +02002472 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002473
2474 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002475 ironlake_disable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002476 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2477}
2478
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002479static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2480{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002481 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002482 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002483
2484 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002485 i915_disable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002486 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002487 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2488}
2489
Ben Widawskyabd58f02013-11-02 21:07:09 -07002490static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2491{
2492 struct drm_i915_private *dev_priv = dev->dev_private;
2493 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002494
2495 if (!i915_pipe_enabled(dev, pipe))
2496 return;
2497
2498 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter7167d7c2013-11-07 11:05:45 +01002499 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2500 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2501 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
Ben Widawskyabd58f02013-11-02 21:07:09 -07002502 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2503}
2504
Chris Wilson893eead2010-10-27 14:44:35 +01002505static u32
2506ring_last_seqno(struct intel_ring_buffer *ring)
Zou Nan hai852835f2010-05-21 09:08:56 +08002507{
Chris Wilson893eead2010-10-27 14:44:35 +01002508 return list_entry(ring->request_list.prev,
2509 struct drm_i915_gem_request, list)->seqno;
2510}
2511
Chris Wilson9107e9d2013-06-10 11:20:20 +01002512static bool
2513ring_idle(struct intel_ring_buffer *ring, u32 seqno)
Chris Wilson893eead2010-10-27 14:44:35 +01002514{
Chris Wilson9107e9d2013-06-10 11:20:20 +01002515 return (list_empty(&ring->request_list) ||
2516 i915_seqno_passed(seqno, ring_last_seqno(ring)));
Ben Gamarif65d9422009-09-14 17:48:44 -04002517}
2518
Daniel Vettera028c4b2014-03-15 00:08:56 +01002519static bool
2520ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2521{
2522 if (INTEL_INFO(dev)->gen >= 8) {
2523 /*
2524 * FIXME: gen8 semaphore support - currently we don't emit
2525 * semaphores on bdw anyway, but this needs to be addressed when
2526 * we merge that code.
2527 */
2528 return false;
2529 } else {
2530 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2531 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2532 MI_SEMAPHORE_REGISTER);
2533 }
2534}
2535
Chris Wilson6274f212013-06-10 11:20:21 +01002536static struct intel_ring_buffer *
Daniel Vetter921d42e2014-03-18 10:26:04 +01002537semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr)
2538{
2539 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2540 struct intel_ring_buffer *signaller;
2541 int i;
2542
2543 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2544 /*
2545 * FIXME: gen8 semaphore support - currently we don't emit
2546 * semaphores on bdw anyway, but this needs to be addressed when
2547 * we merge that code.
2548 */
2549 return NULL;
2550 } else {
2551 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2552
2553 for_each_ring(signaller, dev_priv, i) {
2554 if(ring == signaller)
2555 continue;
2556
2557 if (sync_bits ==
2558 signaller->semaphore_register[ring->id])
2559 return signaller;
2560 }
2561 }
2562
2563 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
2564 ring->id, ipehr);
2565
2566 return NULL;
2567}
2568
Chris Wilson6274f212013-06-10 11:20:21 +01002569static struct intel_ring_buffer *
2570semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
Chris Wilsona24a11e2013-03-14 17:52:05 +02002571{
2572 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Daniel Vetter88fe4292014-03-15 00:08:55 +01002573 u32 cmd, ipehr, head;
2574 int i;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002575
2576 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
Daniel Vettera028c4b2014-03-15 00:08:56 +01002577 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
Chris Wilson6274f212013-06-10 11:20:21 +01002578 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002579
Daniel Vetter88fe4292014-03-15 00:08:55 +01002580 /*
2581 * HEAD is likely pointing to the dword after the actual command,
2582 * so scan backwards until we find the MBOX. But limit it to just 3
2583 * dwords. Note that we don't care about ACTHD here since that might
2584 * point at at batch, and semaphores are always emitted into the
2585 * ringbuffer itself.
Chris Wilsona24a11e2013-03-14 17:52:05 +02002586 */
Daniel Vetter88fe4292014-03-15 00:08:55 +01002587 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2588
2589 for (i = 4; i; --i) {
2590 /*
2591 * Be paranoid and presume the hw has gone off into the wild -
2592 * our ring is smaller than what the hardware (and hence
2593 * HEAD_ADDR) allows. Also handles wrap-around.
2594 */
2595 head &= ring->size - 1;
2596
2597 /* This here seems to blow up */
2598 cmd = ioread32(ring->virtual_start + head);
Chris Wilsona24a11e2013-03-14 17:52:05 +02002599 if (cmd == ipehr)
2600 break;
2601
Daniel Vetter88fe4292014-03-15 00:08:55 +01002602 head -= 4;
2603 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02002604
Daniel Vetter88fe4292014-03-15 00:08:55 +01002605 if (!i)
2606 return NULL;
2607
2608 *seqno = ioread32(ring->virtual_start + head + 4) + 1;
Daniel Vetter921d42e2014-03-18 10:26:04 +01002609 return semaphore_wait_to_signaller_ring(ring, ipehr);
Chris Wilsona24a11e2013-03-14 17:52:05 +02002610}
2611
Chris Wilson6274f212013-06-10 11:20:21 +01002612static int semaphore_passed(struct intel_ring_buffer *ring)
2613{
2614 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2615 struct intel_ring_buffer *signaller;
2616 u32 seqno, ctl;
2617
2618 ring->hangcheck.deadlock = true;
2619
2620 signaller = semaphore_waits_for(ring, &seqno);
2621 if (signaller == NULL || signaller->hangcheck.deadlock)
2622 return -1;
2623
2624 /* cursory check for an unkickable deadlock */
2625 ctl = I915_READ_CTL(signaller);
2626 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2627 return -1;
2628
2629 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2630}
2631
2632static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2633{
2634 struct intel_ring_buffer *ring;
2635 int i;
2636
2637 for_each_ring(ring, dev_priv, i)
2638 ring->hangcheck.deadlock = false;
2639}
2640
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002641static enum intel_ring_hangcheck_action
Chris Wilson50877442014-03-21 12:41:53 +00002642ring_stuck(struct intel_ring_buffer *ring, u64 acthd)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002643{
2644 struct drm_device *dev = ring->dev;
2645 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002646 u32 tmp;
2647
Chris Wilson6274f212013-06-10 11:20:21 +01002648 if (ring->hangcheck.acthd != acthd)
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002649 return HANGCHECK_ACTIVE;
Chris Wilson6274f212013-06-10 11:20:21 +01002650
Chris Wilson9107e9d2013-06-10 11:20:20 +01002651 if (IS_GEN2(dev))
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002652 return HANGCHECK_HUNG;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002653
2654 /* Is the chip hanging on a WAIT_FOR_EVENT?
2655 * If so we can simply poke the RB_WAIT bit
2656 * and break the hang. This should work on
2657 * all but the second generation chipsets.
2658 */
2659 tmp = I915_READ_CTL(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002660 if (tmp & RING_WAIT) {
Mika Kuoppala58174462014-02-25 17:11:26 +02002661 i915_handle_error(dev, false,
2662 "Kicking stuck wait on %s",
2663 ring->name);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002664 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002665 return HANGCHECK_KICK;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002666 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02002667
Chris Wilson6274f212013-06-10 11:20:21 +01002668 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2669 switch (semaphore_passed(ring)) {
2670 default:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002671 return HANGCHECK_HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002672 case 1:
Mika Kuoppala58174462014-02-25 17:11:26 +02002673 i915_handle_error(dev, false,
2674 "Kicking stuck semaphore on %s",
2675 ring->name);
Chris Wilson6274f212013-06-10 11:20:21 +01002676 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002677 return HANGCHECK_KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002678 case 0:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002679 return HANGCHECK_WAIT;
Chris Wilson6274f212013-06-10 11:20:21 +01002680 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002681 }
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002682
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002683 return HANGCHECK_HUNG;
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002684}
2685
Ben Gamarif65d9422009-09-14 17:48:44 -04002686/**
2687 * This is called when the chip hasn't reported back with completed
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002688 * batchbuffers in a long time. We keep track per ring seqno progress and
2689 * if there are no progress, hangcheck score for that ring is increased.
2690 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2691 * we kick the ring. If we see no progress on three subsequent calls
2692 * we assume chip is wedged and try to fix it by resetting the chip.
Ben Gamarif65d9422009-09-14 17:48:44 -04002693 */
Damien Lespiaua658b5d2013-08-08 22:28:56 +01002694static void i915_hangcheck_elapsed(unsigned long data)
Ben Gamarif65d9422009-09-14 17:48:44 -04002695{
2696 struct drm_device *dev = (struct drm_device *)data;
Jani Nikula2d1013d2014-03-31 14:27:17 +03002697 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002698 struct intel_ring_buffer *ring;
Chris Wilsonb4519512012-05-11 14:29:30 +01002699 int i;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002700 int busy_count = 0, rings_hung = 0;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002701 bool stuck[I915_NUM_RINGS] = { 0 };
2702#define BUSY 1
2703#define KICK 5
2704#define HUNG 20
Chris Wilson893eead2010-10-27 14:44:35 +01002705
Jani Nikulad330a952014-01-21 11:24:25 +02002706 if (!i915.enable_hangcheck)
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07002707 return;
2708
Chris Wilsonb4519512012-05-11 14:29:30 +01002709 for_each_ring(ring, dev_priv, i) {
Chris Wilson50877442014-03-21 12:41:53 +00002710 u64 acthd;
2711 u32 seqno;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002712 bool busy = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01002713
Chris Wilson6274f212013-06-10 11:20:21 +01002714 semaphore_clear_deadlocks(dev_priv);
2715
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002716 seqno = ring->get_seqno(ring, false);
2717 acthd = intel_ring_get_active_head(ring);
Chris Wilsond1e61e72012-04-10 17:00:41 +01002718
Chris Wilson9107e9d2013-06-10 11:20:20 +01002719 if (ring->hangcheck.seqno == seqno) {
2720 if (ring_idle(ring, seqno)) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002721 ring->hangcheck.action = HANGCHECK_IDLE;
2722
Chris Wilson9107e9d2013-06-10 11:20:20 +01002723 if (waitqueue_active(&ring->irq_queue)) {
2724 /* Issue a wake-up to catch stuck h/w. */
Chris Wilson094f9a52013-09-25 17:34:55 +01002725 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
Daniel Vetterf4adcd22013-10-28 09:24:13 +01002726 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2727 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2728 ring->name);
2729 else
2730 DRM_INFO("Fake missed irq on %s\n",
2731 ring->name);
Chris Wilson094f9a52013-09-25 17:34:55 +01002732 wake_up_all(&ring->irq_queue);
2733 }
2734 /* Safeguard against driver failure */
2735 ring->hangcheck.score += BUSY;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002736 } else
2737 busy = false;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002738 } else {
Chris Wilson6274f212013-06-10 11:20:21 +01002739 /* We always increment the hangcheck score
2740 * if the ring is busy and still processing
2741 * the same request, so that no single request
2742 * can run indefinitely (such as a chain of
2743 * batches). The only time we do not increment
2744 * the hangcheck score on this ring, if this
2745 * ring is in a legitimate wait for another
2746 * ring. In that case the waiting ring is a
2747 * victim and we want to be sure we catch the
2748 * right culprit. Then every time we do kick
2749 * the ring, add a small increment to the
2750 * score so that we can catch a batch that is
2751 * being repeatedly kicked and so responsible
2752 * for stalling the machine.
2753 */
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002754 ring->hangcheck.action = ring_stuck(ring,
2755 acthd);
2756
2757 switch (ring->hangcheck.action) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002758 case HANGCHECK_IDLE:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002759 case HANGCHECK_WAIT:
Chris Wilson6274f212013-06-10 11:20:21 +01002760 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002761 case HANGCHECK_ACTIVE:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002762 ring->hangcheck.score += BUSY;
Chris Wilson6274f212013-06-10 11:20:21 +01002763 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002764 case HANGCHECK_KICK:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002765 ring->hangcheck.score += KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002766 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002767 case HANGCHECK_HUNG:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002768 ring->hangcheck.score += HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002769 stuck[i] = true;
2770 break;
2771 }
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002772 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002773 } else {
Mika Kuoppalada661462013-09-06 16:03:28 +03002774 ring->hangcheck.action = HANGCHECK_ACTIVE;
2775
Chris Wilson9107e9d2013-06-10 11:20:20 +01002776 /* Gradually reduce the count so that we catch DoS
2777 * attempts across multiple batches.
2778 */
2779 if (ring->hangcheck.score > 0)
2780 ring->hangcheck.score--;
Chris Wilsond1e61e72012-04-10 17:00:41 +01002781 }
2782
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002783 ring->hangcheck.seqno = seqno;
2784 ring->hangcheck.acthd = acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002785 busy_count += busy;
Chris Wilson893eead2010-10-27 14:44:35 +01002786 }
Eric Anholtb9201c12010-01-08 14:25:16 -08002787
Mika Kuoppala92cab732013-05-24 17:16:07 +03002788 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002789 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
Daniel Vetterb8d88d12013-08-28 10:57:59 +02002790 DRM_INFO("%s on %s\n",
2791 stuck[i] ? "stuck" : "no progress",
2792 ring->name);
Chris Wilsona43adf02013-06-10 11:20:22 +01002793 rings_hung++;
Mika Kuoppala92cab732013-05-24 17:16:07 +03002794 }
2795 }
2796
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002797 if (rings_hung)
Mika Kuoppala58174462014-02-25 17:11:26 +02002798 return i915_handle_error(dev, true, "Ring hung");
Ben Gamarif65d9422009-09-14 17:48:44 -04002799
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002800 if (busy_count)
2801 /* Reset timer case chip hangs without another request
2802 * being added */
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002803 i915_queue_hangcheck(dev);
2804}
2805
2806void i915_queue_hangcheck(struct drm_device *dev)
2807{
2808 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulad330a952014-01-21 11:24:25 +02002809 if (!i915.enable_hangcheck)
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002810 return;
2811
2812 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2813 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
Ben Gamarif65d9422009-09-14 17:48:44 -04002814}
2815
Paulo Zanoni1c69eb42014-04-01 15:37:23 -03002816static void ibx_irq_reset(struct drm_device *dev)
Paulo Zanoni91738a92013-06-05 14:21:51 -03002817{
2818 struct drm_i915_private *dev_priv = dev->dev_private;
2819
2820 if (HAS_PCH_NOP(dev))
2821 return;
2822
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002823 GEN5_IRQ_RESET(SDE);
Paulo Zanoni105b1222014-04-01 15:37:17 -03002824
2825 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2826 I915_WRITE(SERR_INT, 0xffffffff);
Paulo Zanoni622364b2014-04-01 15:37:22 -03002827}
Paulo Zanoni105b1222014-04-01 15:37:17 -03002828
Paulo Zanoni622364b2014-04-01 15:37:22 -03002829/*
2830 * SDEIER is also touched by the interrupt handler to work around missed PCH
2831 * interrupts. Hence we can't update it after the interrupt handler is enabled -
2832 * instead we unconditionally enable all PCH interrupt sources here, but then
2833 * only unmask them as needed with SDEIMR.
2834 *
2835 * This function needs to be called before interrupts are enabled.
2836 */
2837static void ibx_irq_pre_postinstall(struct drm_device *dev)
2838{
2839 struct drm_i915_private *dev_priv = dev->dev_private;
2840
2841 if (HAS_PCH_NOP(dev))
2842 return;
2843
2844 WARN_ON(I915_READ(SDEIER) != 0);
Paulo Zanoni91738a92013-06-05 14:21:51 -03002845 I915_WRITE(SDEIER, 0xffffffff);
2846 POSTING_READ(SDEIER);
2847}
2848
Paulo Zanoni7c4d6642014-04-01 15:37:19 -03002849static void gen5_gt_irq_reset(struct drm_device *dev)
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002850{
2851 struct drm_i915_private *dev_priv = dev->dev_private;
2852
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002853 GEN5_IRQ_RESET(GT);
Paulo Zanonia9d356a2014-04-01 15:37:09 -03002854 if (INTEL_INFO(dev)->gen >= 6)
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002855 GEN5_IRQ_RESET(GEN6_PM);
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002856}
2857
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858/* drm_dma.h hooks
2859*/
Paulo Zanonibe30b292014-04-01 15:37:25 -03002860static void ironlake_irq_reset(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002861{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002862 struct drm_i915_private *dev_priv = dev->dev_private;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002863
Paulo Zanoni0c841212014-04-01 15:37:27 -03002864 I915_WRITE(HWSTAM, 0xffffffff);
Daniel Vetterbdfcdb62012-01-05 01:05:26 +01002865
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002866 GEN5_IRQ_RESET(DE);
Paulo Zanonic6d954c2014-04-01 15:37:18 -03002867 if (IS_GEN7(dev))
2868 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002869
Paulo Zanoni7c4d6642014-04-01 15:37:19 -03002870 gen5_gt_irq_reset(dev);
Zhenyu Wangc6501562009-11-03 18:57:21 +00002871
Paulo Zanoni1c69eb42014-04-01 15:37:23 -03002872 ibx_irq_reset(dev);
Ben Widawsky7d991632013-05-28 19:22:25 -07002873}
2874
Paulo Zanonibe30b292014-04-01 15:37:25 -03002875static void ironlake_irq_preinstall(struct drm_device *dev)
2876{
Paulo Zanonibe30b292014-04-01 15:37:25 -03002877 ironlake_irq_reset(dev);
Ben Widawsky7d991632013-05-28 19:22:25 -07002878}
2879
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002880static void valleyview_irq_preinstall(struct drm_device *dev)
2881{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002882 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002883 int pipe;
2884
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002885 /* VLV magic */
2886 I915_WRITE(VLV_IMR, 0);
2887 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2888 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2889 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2890
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002891 /* and GT */
2892 I915_WRITE(GTIIR, I915_READ(GTIIR));
2893 I915_WRITE(GTIIR, I915_READ(GTIIR));
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002894
Paulo Zanoni7c4d6642014-04-01 15:37:19 -03002895 gen5_gt_irq_reset(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002896
2897 I915_WRITE(DPINVGTT, 0xff);
2898
2899 I915_WRITE(PORT_HOTPLUG_EN, 0);
2900 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2901 for_each_pipe(pipe)
2902 I915_WRITE(PIPESTAT(pipe), 0xffff);
2903 I915_WRITE(VLV_IIR, 0xffffffff);
2904 I915_WRITE(VLV_IMR, 0xffffffff);
2905 I915_WRITE(VLV_IER, 0x0);
2906 POSTING_READ(VLV_IER);
2907}
2908
Paulo Zanoni823f6b32014-04-01 15:37:26 -03002909static void gen8_irq_reset(struct drm_device *dev)
Ben Widawskyabd58f02013-11-02 21:07:09 -07002910{
2911 struct drm_i915_private *dev_priv = dev->dev_private;
2912 int pipe;
2913
Ben Widawskyabd58f02013-11-02 21:07:09 -07002914 I915_WRITE(GEN8_MASTER_IRQ, 0);
2915 POSTING_READ(GEN8_MASTER_IRQ);
2916
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002917 GEN8_IRQ_RESET_NDX(GT, 0);
2918 GEN8_IRQ_RESET_NDX(GT, 1);
2919 GEN8_IRQ_RESET_NDX(GT, 2);
2920 GEN8_IRQ_RESET_NDX(GT, 3);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002921
Paulo Zanoni823f6b32014-04-01 15:37:26 -03002922 for_each_pipe(pipe)
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002923 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002924
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002925 GEN5_IRQ_RESET(GEN8_DE_PORT_);
2926 GEN5_IRQ_RESET(GEN8_DE_MISC_);
2927 GEN5_IRQ_RESET(GEN8_PCU_);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002928
Paulo Zanoni1c69eb42014-04-01 15:37:23 -03002929 ibx_irq_reset(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002930}
Ben Widawskyabd58f02013-11-02 21:07:09 -07002931
Paulo Zanoni823f6b32014-04-01 15:37:26 -03002932static void gen8_irq_preinstall(struct drm_device *dev)
2933{
2934 gen8_irq_reset(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002935}
2936
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002937static void ibx_hpd_irq_setup(struct drm_device *dev)
Keith Packard7fe0b972011-09-19 13:31:02 -07002938{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002939 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002940 struct drm_mode_config *mode_config = &dev->mode_config;
2941 struct intel_encoder *intel_encoder;
Daniel Vetterfee884e2013-07-04 23:35:21 +02002942 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
Keith Packard7fe0b972011-09-19 13:31:02 -07002943
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002944 if (HAS_PCH_IBX(dev)) {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002945 hotplug_irqs = SDE_HOTPLUG_MASK;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002946 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002947 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002948 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002949 } else {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002950 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002951 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002952 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002953 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002954 }
2955
Daniel Vetterfee884e2013-07-04 23:35:21 +02002956 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002957
2958 /*
2959 * Enable digital hotplug on the PCH, and configure the DP short pulse
2960 * duration to 2ms (which is the minimum in the Display Port spec)
2961 *
2962 * This register is the same on all known PCH chips.
2963 */
Keith Packard7fe0b972011-09-19 13:31:02 -07002964 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2965 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2966 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2967 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2968 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2969 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2970}
2971
Paulo Zanonid46da432013-02-08 17:35:15 -02002972static void ibx_irq_postinstall(struct drm_device *dev)
2973{
Jani Nikula2d1013d2014-03-31 14:27:17 +03002974 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002975 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02002976
Daniel Vetter692a04c2013-05-29 21:43:05 +02002977 if (HAS_PCH_NOP(dev))
2978 return;
2979
Paulo Zanoni105b1222014-04-01 15:37:17 -03002980 if (HAS_PCH_IBX(dev))
Daniel Vetter5c673b62014-03-07 20:34:46 +01002981 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
Paulo Zanoni105b1222014-04-01 15:37:17 -03002982 else
Daniel Vetter5c673b62014-03-07 20:34:46 +01002983 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
Paulo Zanoni86642812013-04-12 17:57:57 -03002984
Paulo Zanoni337ba012014-04-01 15:37:16 -03002985 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
Paulo Zanonid46da432013-02-08 17:35:15 -02002986 I915_WRITE(SDEIMR, ~mask);
Paulo Zanonid46da432013-02-08 17:35:15 -02002987}
2988
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002989static void gen5_gt_irq_postinstall(struct drm_device *dev)
2990{
2991 struct drm_i915_private *dev_priv = dev->dev_private;
2992 u32 pm_irqs, gt_irqs;
2993
2994 pm_irqs = gt_irqs = 0;
2995
2996 dev_priv->gt_irq_mask = ~0;
Ben Widawsky040d2ba2013-09-19 11:01:40 -07002997 if (HAS_L3_DPF(dev)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002998 /* L3 parity interrupt is always unmasked. */
Ben Widawsky35a85ac2013-09-19 11:13:41 -07002999 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3000 gt_irqs |= GT_PARITY_ERROR(dev);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003001 }
3002
3003 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3004 if (IS_GEN5(dev)) {
3005 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3006 ILK_BSD_USER_INTERRUPT;
3007 } else {
3008 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3009 }
3010
Paulo Zanoni35079892014-04-01 15:37:15 -03003011 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003012
3013 if (INTEL_INFO(dev)->gen >= 6) {
Deepak Sa6706b42014-03-15 20:23:22 +05303014 pm_irqs |= dev_priv->pm_rps_events;
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003015
3016 if (HAS_VEBOX(dev))
3017 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3018
Paulo Zanoni605cd252013-08-06 18:57:15 -03003019 dev_priv->pm_irq_mask = 0xffffffff;
Paulo Zanoni35079892014-04-01 15:37:15 -03003020 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003021 }
3022}
3023
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003024static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003025{
Daniel Vetter4bc9d432013-06-27 13:44:58 +02003026 unsigned long irqflags;
Jani Nikula2d1013d2014-03-31 14:27:17 +03003027 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003028 u32 display_mask, extra_mask;
3029
3030 if (INTEL_INFO(dev)->gen >= 7) {
3031 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3032 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3033 DE_PLANEB_FLIP_DONE_IVB |
Daniel Vetter5c673b62014-03-07 20:34:46 +01003034 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003035 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
Daniel Vetter5c673b62014-03-07 20:34:46 +01003036 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003037 } else {
3038 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3039 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02003040 DE_AUX_CHANNEL_A |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02003041 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3042 DE_POISON);
Daniel Vetter5c673b62014-03-07 20:34:46 +01003043 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3044 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003045 }
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003046
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003047 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003048
Paulo Zanoni0c841212014-04-01 15:37:27 -03003049 I915_WRITE(HWSTAM, 0xeffe);
3050
Paulo Zanoni622364b2014-04-01 15:37:22 -03003051 ibx_irq_pre_postinstall(dev);
3052
Paulo Zanoni35079892014-04-01 15:37:15 -03003053 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003054
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003055 gen5_gt_irq_postinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003056
Paulo Zanonid46da432013-02-08 17:35:15 -02003057 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07003058
Jesse Barnesf97108d2010-01-29 11:27:07 -08003059 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02003060 /* Enable PCU event interrupts
3061 *
3062 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02003063 * setup is guaranteed to run in single-threaded context. But we
3064 * need it to make the assert_spin_locked happy. */
3065 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08003066 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetter4bc9d432013-06-27 13:44:58 +02003067 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08003068 }
3069
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003070 return 0;
3071}
3072
Imre Deakf8b79e52014-03-04 19:23:07 +02003073static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3074{
3075 u32 pipestat_mask;
3076 u32 iir_mask;
3077
3078 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3079 PIPE_FIFO_UNDERRUN_STATUS;
3080
3081 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3082 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3083 POSTING_READ(PIPESTAT(PIPE_A));
3084
3085 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3086 PIPE_CRC_DONE_INTERRUPT_STATUS;
3087
3088 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3089 PIPE_GMBUS_INTERRUPT_STATUS);
3090 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3091
3092 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3093 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3094 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3095 dev_priv->irq_mask &= ~iir_mask;
3096
3097 I915_WRITE(VLV_IIR, iir_mask);
3098 I915_WRITE(VLV_IIR, iir_mask);
3099 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3100 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3101 POSTING_READ(VLV_IER);
3102}
3103
3104static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3105{
3106 u32 pipestat_mask;
3107 u32 iir_mask;
3108
3109 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3110 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
Imre Deak6c7fba02014-03-10 19:44:48 +02003111 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
Imre Deakf8b79e52014-03-04 19:23:07 +02003112
3113 dev_priv->irq_mask |= iir_mask;
3114 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3115 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3116 I915_WRITE(VLV_IIR, iir_mask);
3117 I915_WRITE(VLV_IIR, iir_mask);
3118 POSTING_READ(VLV_IIR);
3119
3120 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3121 PIPE_CRC_DONE_INTERRUPT_STATUS;
3122
3123 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3124 PIPE_GMBUS_INTERRUPT_STATUS);
3125 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3126
3127 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3128 PIPE_FIFO_UNDERRUN_STATUS;
3129 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3130 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3131 POSTING_READ(PIPESTAT(PIPE_A));
3132}
3133
3134void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3135{
3136 assert_spin_locked(&dev_priv->irq_lock);
3137
3138 if (dev_priv->display_irqs_enabled)
3139 return;
3140
3141 dev_priv->display_irqs_enabled = true;
3142
3143 if (dev_priv->dev->irq_enabled)
3144 valleyview_display_irqs_install(dev_priv);
3145}
3146
3147void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3148{
3149 assert_spin_locked(&dev_priv->irq_lock);
3150
3151 if (!dev_priv->display_irqs_enabled)
3152 return;
3153
3154 dev_priv->display_irqs_enabled = false;
3155
3156 if (dev_priv->dev->irq_enabled)
3157 valleyview_display_irqs_uninstall(dev_priv);
3158}
3159
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003160static int valleyview_irq_postinstall(struct drm_device *dev)
3161{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003162 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterb79480b2013-06-27 17:52:10 +02003163 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003164
Imre Deakf8b79e52014-03-04 19:23:07 +02003165 dev_priv->irq_mask = ~0;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003166
Daniel Vetter20afbda2012-12-11 14:05:07 +01003167 I915_WRITE(PORT_HOTPLUG_EN, 0);
3168 POSTING_READ(PORT_HOTPLUG_EN);
3169
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003170 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
Imre Deakf8b79e52014-03-04 19:23:07 +02003171 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003172 I915_WRITE(VLV_IIR, 0xffffffff);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003173 POSTING_READ(VLV_IER);
3174
Daniel Vetterb79480b2013-06-27 17:52:10 +02003175 /* Interrupt setup is already guaranteed to be single-threaded, this is
3176 * just to make the assert_spin_locked check happy. */
3177 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Imre Deakf8b79e52014-03-04 19:23:07 +02003178 if (dev_priv->display_irqs_enabled)
3179 valleyview_display_irqs_install(dev_priv);
Daniel Vetterb79480b2013-06-27 17:52:10 +02003180 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07003181
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003182 I915_WRITE(VLV_IIR, 0xffffffff);
3183 I915_WRITE(VLV_IIR, 0xffffffff);
3184
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003185 gen5_gt_irq_postinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003186
3187 /* ack & enable invalid PTE error interrupts */
3188#if 0 /* FIXME: add support to irq handler for checking these bits */
3189 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3190 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3191#endif
3192
3193 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003194
3195 return 0;
3196}
3197
Ben Widawskyabd58f02013-11-02 21:07:09 -07003198static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3199{
3200 int i;
3201
3202 /* These are interrupts we'll toggle with the ring mask register */
3203 uint32_t gt_interrupts[] = {
3204 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3205 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3206 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3207 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3208 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3209 0,
3210 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3211 };
3212
Paulo Zanoni337ba012014-04-01 15:37:16 -03003213 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
Paulo Zanoni35079892014-04-01 15:37:15 -03003214 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003215}
3216
3217static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3218{
3219 struct drm_device *dev = dev_priv->dev;
Damien Lespiaud0e1f1c2014-04-08 01:22:44 +01003220 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01003221 GEN8_PIPE_CDCLK_CRC_DONE |
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01003222 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
Daniel Vetter5c673b62014-03-07 20:34:46 +01003223 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3224 GEN8_PIPE_FIFO_UNDERRUN;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003225 int pipe;
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01003226 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3227 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3228 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003229
Paulo Zanoni337ba012014-04-01 15:37:16 -03003230 for_each_pipe(pipe)
Paulo Zanoni35079892014-04-01 15:37:15 -03003231 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
3232 de_pipe_enables);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003233
Paulo Zanoni35079892014-04-01 15:37:15 -03003234 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003235}
3236
3237static int gen8_irq_postinstall(struct drm_device *dev)
3238{
3239 struct drm_i915_private *dev_priv = dev->dev_private;
3240
Paulo Zanoni622364b2014-04-01 15:37:22 -03003241 ibx_irq_pre_postinstall(dev);
3242
Ben Widawskyabd58f02013-11-02 21:07:09 -07003243 gen8_gt_irq_postinstall(dev_priv);
3244 gen8_de_irq_postinstall(dev_priv);
3245
3246 ibx_irq_postinstall(dev);
3247
3248 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3249 POSTING_READ(GEN8_MASTER_IRQ);
3250
3251 return 0;
3252}
3253
3254static void gen8_irq_uninstall(struct drm_device *dev)
3255{
3256 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003257
3258 if (!dev_priv)
3259 return;
3260
Paulo Zanonid4eb6b12014-04-01 15:37:24 -03003261 intel_hpd_irq_uninstall(dev_priv);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003262
Paulo Zanoni823f6b32014-04-01 15:37:26 -03003263 gen8_irq_reset(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003264}
3265
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003266static void valleyview_irq_uninstall(struct drm_device *dev)
3267{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003268 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deakf8b79e52014-03-04 19:23:07 +02003269 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003270 int pipe;
3271
3272 if (!dev_priv)
3273 return;
3274
Imre Deak843d0e72014-04-14 20:24:23 +03003275 I915_WRITE(VLV_MASTER_IER, 0);
3276
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003277 intel_hpd_irq_uninstall(dev_priv);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003278
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003279 for_each_pipe(pipe)
3280 I915_WRITE(PIPESTAT(pipe), 0xffff);
3281
3282 I915_WRITE(HWSTAM, 0xffffffff);
3283 I915_WRITE(PORT_HOTPLUG_EN, 0);
3284 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Imre Deakf8b79e52014-03-04 19:23:07 +02003285
3286 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3287 if (dev_priv->display_irqs_enabled)
3288 valleyview_display_irqs_uninstall(dev_priv);
3289 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3290
3291 dev_priv->irq_mask = 0;
3292
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003293 I915_WRITE(VLV_IIR, 0xffffffff);
3294 I915_WRITE(VLV_IMR, 0xffffffff);
3295 I915_WRITE(VLV_IER, 0x0);
3296 POSTING_READ(VLV_IER);
3297}
3298
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003299static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003300{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003301 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes46979952011-04-07 13:53:55 -07003302
3303 if (!dev_priv)
3304 return;
3305
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003306 intel_hpd_irq_uninstall(dev_priv);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003307
Paulo Zanonibe30b292014-04-01 15:37:25 -03003308 ironlake_irq_reset(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003309}
3310
Chris Wilsonc2798b12012-04-22 21:13:57 +01003311static void i8xx_irq_preinstall(struct drm_device * dev)
3312{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003313 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003314 int pipe;
3315
Chris Wilsonc2798b12012-04-22 21:13:57 +01003316 for_each_pipe(pipe)
3317 I915_WRITE(PIPESTAT(pipe), 0);
3318 I915_WRITE16(IMR, 0xffff);
3319 I915_WRITE16(IER, 0x0);
3320 POSTING_READ16(IER);
3321}
3322
3323static int i8xx_irq_postinstall(struct drm_device *dev)
3324{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003325 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter379ef822013-10-16 22:55:56 +02003326 unsigned long irqflags;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003327
Chris Wilsonc2798b12012-04-22 21:13:57 +01003328 I915_WRITE16(EMR,
3329 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3330
3331 /* Unmask the interrupts that we always want on. */
3332 dev_priv->irq_mask =
3333 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3334 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3335 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3336 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3337 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3338 I915_WRITE16(IMR, dev_priv->irq_mask);
3339
3340 I915_WRITE16(IER,
3341 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3342 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3343 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3344 I915_USER_INTERRUPT);
3345 POSTING_READ16(IER);
3346
Daniel Vetter379ef822013-10-16 22:55:56 +02003347 /* Interrupt setup is already guaranteed to be single-threaded, this is
3348 * just to make the assert_spin_locked check happy. */
3349 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Imre Deak755e9012014-02-10 18:42:47 +02003350 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3351 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetter379ef822013-10-16 22:55:56 +02003352 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3353
Chris Wilsonc2798b12012-04-22 21:13:57 +01003354 return 0;
3355}
3356
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003357/*
3358 * Returns true when a page flip has completed.
3359 */
3360static bool i8xx_handle_vblank(struct drm_device *dev,
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003361 int plane, int pipe, u32 iir)
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003362{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003363 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003364 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003365
3366 if (!drm_handle_vblank(dev, pipe))
3367 return false;
3368
3369 if ((iir & flip_pending) == 0)
3370 return false;
3371
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003372 intel_prepare_page_flip(dev, plane);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003373
3374 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3375 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3376 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3377 * the flip is completed (no longer pending). Since this doesn't raise
3378 * an interrupt per se, we watch for the change at vblank.
3379 */
3380 if (I915_READ16(ISR) & flip_pending)
3381 return false;
3382
3383 intel_finish_page_flip(dev, pipe);
3384
3385 return true;
3386}
3387
Daniel Vetterff1f5252012-10-02 15:10:55 +02003388static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003389{
3390 struct drm_device *dev = (struct drm_device *) arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03003391 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003392 u16 iir, new_iir;
3393 u32 pipe_stats[2];
3394 unsigned long irqflags;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003395 int pipe;
3396 u16 flip_mask =
3397 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3398 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3399
Chris Wilsonc2798b12012-04-22 21:13:57 +01003400 iir = I915_READ16(IIR);
3401 if (iir == 0)
3402 return IRQ_NONE;
3403
3404 while (iir & ~flip_mask) {
3405 /* Can't rely on pipestat interrupt bit in iir as it might
3406 * have been cleared after the pipestat interrupt was received.
3407 * It doesn't set the bit in iir again, but it still produces
3408 * interrupts (for non-MSI).
3409 */
3410 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3411 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Mika Kuoppala58174462014-02-25 17:11:26 +02003412 i915_handle_error(dev, false,
3413 "Command parser error, iir 0x%08x",
3414 iir);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003415
3416 for_each_pipe(pipe) {
3417 int reg = PIPESTAT(pipe);
3418 pipe_stats[pipe] = I915_READ(reg);
3419
3420 /*
3421 * Clear the PIPE*STAT regs before the IIR
3422 */
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003423 if (pipe_stats[pipe] & 0x8000ffff)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003424 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003425 }
3426 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3427
3428 I915_WRITE16(IIR, iir & ~flip_mask);
3429 new_iir = I915_READ16(IIR); /* Flush posted writes */
3430
Daniel Vetterd05c6172012-04-26 23:28:09 +02003431 i915_update_dri1_breadcrumb(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003432
3433 if (iir & I915_USER_INTERRUPT)
3434 notify_ring(dev, &dev_priv->ring[RCS]);
3435
Daniel Vetter4356d582013-10-16 22:55:55 +02003436 for_each_pipe(pipe) {
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003437 int plane = pipe;
Daniel Vetter3a77c4c2014-01-10 08:50:12 +01003438 if (HAS_FBC(dev))
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003439 plane = !plane;
3440
Daniel Vetter4356d582013-10-16 22:55:55 +02003441 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003442 i8xx_handle_vblank(dev, plane, pipe, iir))
3443 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003444
Daniel Vetter4356d582013-10-16 22:55:55 +02003445 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003446 i9xx_pipe_crc_irq_handler(dev, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003447
3448 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3449 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02003450 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
Daniel Vetter4356d582013-10-16 22:55:55 +02003451 }
Chris Wilsonc2798b12012-04-22 21:13:57 +01003452
3453 iir = new_iir;
3454 }
3455
3456 return IRQ_HANDLED;
3457}
3458
3459static void i8xx_irq_uninstall(struct drm_device * dev)
3460{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003461 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003462 int pipe;
3463
Chris Wilsonc2798b12012-04-22 21:13:57 +01003464 for_each_pipe(pipe) {
3465 /* Clear enable bits; then clear status bits */
3466 I915_WRITE(PIPESTAT(pipe), 0);
3467 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3468 }
3469 I915_WRITE16(IMR, 0xffff);
3470 I915_WRITE16(IER, 0x0);
3471 I915_WRITE16(IIR, I915_READ16(IIR));
3472}
3473
Chris Wilsona266c7d2012-04-24 22:59:44 +01003474static void i915_irq_preinstall(struct drm_device * dev)
3475{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003476 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003477 int pipe;
3478
Chris Wilsona266c7d2012-04-24 22:59:44 +01003479 if (I915_HAS_HOTPLUG(dev)) {
3480 I915_WRITE(PORT_HOTPLUG_EN, 0);
3481 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3482 }
3483
Chris Wilson00d98eb2012-04-24 22:59:48 +01003484 I915_WRITE16(HWSTAM, 0xeffe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003485 for_each_pipe(pipe)
3486 I915_WRITE(PIPESTAT(pipe), 0);
3487 I915_WRITE(IMR, 0xffffffff);
3488 I915_WRITE(IER, 0x0);
3489 POSTING_READ(IER);
3490}
3491
3492static int i915_irq_postinstall(struct drm_device *dev)
3493{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003494 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson38bde182012-04-24 22:59:50 +01003495 u32 enable_mask;
Daniel Vetter379ef822013-10-16 22:55:56 +02003496 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003497
Chris Wilson38bde182012-04-24 22:59:50 +01003498 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3499
3500 /* Unmask the interrupts that we always want on. */
3501 dev_priv->irq_mask =
3502 ~(I915_ASLE_INTERRUPT |
3503 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3504 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3505 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3506 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3507 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3508
3509 enable_mask =
3510 I915_ASLE_INTERRUPT |
3511 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3512 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3513 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3514 I915_USER_INTERRUPT;
3515
Chris Wilsona266c7d2012-04-24 22:59:44 +01003516 if (I915_HAS_HOTPLUG(dev)) {
Daniel Vetter20afbda2012-12-11 14:05:07 +01003517 I915_WRITE(PORT_HOTPLUG_EN, 0);
3518 POSTING_READ(PORT_HOTPLUG_EN);
3519
Chris Wilsona266c7d2012-04-24 22:59:44 +01003520 /* Enable in IER... */
3521 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3522 /* and unmask in IMR */
3523 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3524 }
3525
Chris Wilsona266c7d2012-04-24 22:59:44 +01003526 I915_WRITE(IMR, dev_priv->irq_mask);
3527 I915_WRITE(IER, enable_mask);
3528 POSTING_READ(IER);
3529
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003530 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003531
Daniel Vetter379ef822013-10-16 22:55:56 +02003532 /* Interrupt setup is already guaranteed to be single-threaded, this is
3533 * just to make the assert_spin_locked check happy. */
3534 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Imre Deak755e9012014-02-10 18:42:47 +02003535 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3536 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetter379ef822013-10-16 22:55:56 +02003537 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3538
Daniel Vetter20afbda2012-12-11 14:05:07 +01003539 return 0;
3540}
3541
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003542/*
3543 * Returns true when a page flip has completed.
3544 */
3545static bool i915_handle_vblank(struct drm_device *dev,
3546 int plane, int pipe, u32 iir)
3547{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003548 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003549 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3550
3551 if (!drm_handle_vblank(dev, pipe))
3552 return false;
3553
3554 if ((iir & flip_pending) == 0)
3555 return false;
3556
3557 intel_prepare_page_flip(dev, plane);
3558
3559 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3560 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3561 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3562 * the flip is completed (no longer pending). Since this doesn't raise
3563 * an interrupt per se, we watch for the change at vblank.
3564 */
3565 if (I915_READ(ISR) & flip_pending)
3566 return false;
3567
3568 intel_finish_page_flip(dev, pipe);
3569
3570 return true;
3571}
3572
Daniel Vetterff1f5252012-10-02 15:10:55 +02003573static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003574{
3575 struct drm_device *dev = (struct drm_device *) arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03003576 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8291ee92012-04-24 22:59:47 +01003577 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003578 unsigned long irqflags;
Chris Wilson38bde182012-04-24 22:59:50 +01003579 u32 flip_mask =
3580 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3581 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilson38bde182012-04-24 22:59:50 +01003582 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003583
Chris Wilsona266c7d2012-04-24 22:59:44 +01003584 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01003585 do {
3586 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01003587 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003588
3589 /* Can't rely on pipestat interrupt bit in iir as it might
3590 * have been cleared after the pipestat interrupt was received.
3591 * It doesn't set the bit in iir again, but it still produces
3592 * interrupts (for non-MSI).
3593 */
3594 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3595 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Mika Kuoppala58174462014-02-25 17:11:26 +02003596 i915_handle_error(dev, false,
3597 "Command parser error, iir 0x%08x",
3598 iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003599
3600 for_each_pipe(pipe) {
3601 int reg = PIPESTAT(pipe);
3602 pipe_stats[pipe] = I915_READ(reg);
3603
Chris Wilson38bde182012-04-24 22:59:50 +01003604 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003605 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003606 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01003607 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003608 }
3609 }
3610 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3611
3612 if (!irq_received)
3613 break;
3614
Chris Wilsona266c7d2012-04-24 22:59:44 +01003615 /* Consume port. Then clear IIR or we'll miss events */
Ville Syrjälä16c6c562014-04-01 10:54:36 +03003616 if (I915_HAS_HOTPLUG(dev) &&
3617 iir & I915_DISPLAY_PORT_INTERRUPT)
3618 i9xx_hpd_irq_handler(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003619
Chris Wilson38bde182012-04-24 22:59:50 +01003620 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003621 new_iir = I915_READ(IIR); /* Flush posted writes */
3622
Chris Wilsona266c7d2012-04-24 22:59:44 +01003623 if (iir & I915_USER_INTERRUPT)
3624 notify_ring(dev, &dev_priv->ring[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003625
Chris Wilsona266c7d2012-04-24 22:59:44 +01003626 for_each_pipe(pipe) {
Chris Wilson38bde182012-04-24 22:59:50 +01003627 int plane = pipe;
Daniel Vetter3a77c4c2014-01-10 08:50:12 +01003628 if (HAS_FBC(dev))
Chris Wilson38bde182012-04-24 22:59:50 +01003629 plane = !plane;
Ville Syrjälä5e2032d2013-02-19 15:16:38 +02003630
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003631 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3632 i915_handle_vblank(dev, plane, pipe, iir))
3633 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003634
3635 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3636 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003637
3638 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003639 i9xx_pipe_crc_irq_handler(dev, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003640
3641 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3642 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02003643 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003644 }
3645
Chris Wilsona266c7d2012-04-24 22:59:44 +01003646 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3647 intel_opregion_asle_intr(dev);
3648
3649 /* With MSI, interrupts are only generated when iir
3650 * transitions from zero to nonzero. If another bit got
3651 * set while we were handling the existing iir bits, then
3652 * we would never get another interrupt.
3653 *
3654 * This is fine on non-MSI as well, as if we hit this path
3655 * we avoid exiting the interrupt handler only to generate
3656 * another one.
3657 *
3658 * Note that for MSI this could cause a stray interrupt report
3659 * if an interrupt landed in the time between writing IIR and
3660 * the posting read. This should be rare enough to never
3661 * trigger the 99% of 100,000 interrupts test for disabling
3662 * stray interrupts.
3663 */
Chris Wilson38bde182012-04-24 22:59:50 +01003664 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003665 iir = new_iir;
Chris Wilson38bde182012-04-24 22:59:50 +01003666 } while (iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003667
Daniel Vetterd05c6172012-04-26 23:28:09 +02003668 i915_update_dri1_breadcrumb(dev);
Chris Wilson8291ee92012-04-24 22:59:47 +01003669
Chris Wilsona266c7d2012-04-24 22:59:44 +01003670 return ret;
3671}
3672
3673static void i915_irq_uninstall(struct drm_device * dev)
3674{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003675 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003676 int pipe;
3677
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003678 intel_hpd_irq_uninstall(dev_priv);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003679
Chris Wilsona266c7d2012-04-24 22:59:44 +01003680 if (I915_HAS_HOTPLUG(dev)) {
3681 I915_WRITE(PORT_HOTPLUG_EN, 0);
3682 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3683 }
3684
Chris Wilson00d98eb2012-04-24 22:59:48 +01003685 I915_WRITE16(HWSTAM, 0xffff);
Chris Wilson55b39752012-04-24 22:59:49 +01003686 for_each_pipe(pipe) {
3687 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003688 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01003689 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3690 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003691 I915_WRITE(IMR, 0xffffffff);
3692 I915_WRITE(IER, 0x0);
3693
Chris Wilsona266c7d2012-04-24 22:59:44 +01003694 I915_WRITE(IIR, I915_READ(IIR));
3695}
3696
3697static void i965_irq_preinstall(struct drm_device * dev)
3698{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003699 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003700 int pipe;
3701
Chris Wilsonadca4732012-05-11 18:01:31 +01003702 I915_WRITE(PORT_HOTPLUG_EN, 0);
3703 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003704
3705 I915_WRITE(HWSTAM, 0xeffe);
3706 for_each_pipe(pipe)
3707 I915_WRITE(PIPESTAT(pipe), 0);
3708 I915_WRITE(IMR, 0xffffffff);
3709 I915_WRITE(IER, 0x0);
3710 POSTING_READ(IER);
3711}
3712
3713static int i965_irq_postinstall(struct drm_device *dev)
3714{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003715 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003716 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003717 u32 error_mask;
Daniel Vetterb79480b2013-06-27 17:52:10 +02003718 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003719
Chris Wilsona266c7d2012-04-24 22:59:44 +01003720 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003721 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01003722 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003723 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3724 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3725 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3726 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3727 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3728
3729 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003730 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3731 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003732 enable_mask |= I915_USER_INTERRUPT;
3733
3734 if (IS_G4X(dev))
3735 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003736
Daniel Vetterb79480b2013-06-27 17:52:10 +02003737 /* Interrupt setup is already guaranteed to be single-threaded, this is
3738 * just to make the assert_spin_locked check happy. */
3739 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Imre Deak755e9012014-02-10 18:42:47 +02003740 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3741 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3742 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterb79480b2013-06-27 17:52:10 +02003743 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003744
Chris Wilsona266c7d2012-04-24 22:59:44 +01003745 /*
3746 * Enable some error detection, note the instruction error mask
3747 * bit is reserved, so we leave it masked.
3748 */
3749 if (IS_G4X(dev)) {
3750 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3751 GM45_ERROR_MEM_PRIV |
3752 GM45_ERROR_CP_PRIV |
3753 I915_ERROR_MEMORY_REFRESH);
3754 } else {
3755 error_mask = ~(I915_ERROR_PAGE_TABLE |
3756 I915_ERROR_MEMORY_REFRESH);
3757 }
3758 I915_WRITE(EMR, error_mask);
3759
3760 I915_WRITE(IMR, dev_priv->irq_mask);
3761 I915_WRITE(IER, enable_mask);
3762 POSTING_READ(IER);
3763
Daniel Vetter20afbda2012-12-11 14:05:07 +01003764 I915_WRITE(PORT_HOTPLUG_EN, 0);
3765 POSTING_READ(PORT_HOTPLUG_EN);
3766
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003767 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003768
3769 return 0;
3770}
3771
Egbert Eichbac56d52013-02-25 12:06:51 -05003772static void i915_hpd_irq_setup(struct drm_device *dev)
Daniel Vetter20afbda2012-12-11 14:05:07 +01003773{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003774 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eiche5868a32013-02-28 04:17:12 -05003775 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +02003776 struct intel_encoder *intel_encoder;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003777 u32 hotplug_en;
3778
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003779 assert_spin_locked(&dev_priv->irq_lock);
3780
Egbert Eichbac56d52013-02-25 12:06:51 -05003781 if (I915_HAS_HOTPLUG(dev)) {
3782 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3783 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3784 /* Note HDMI and DP share hotplug bits */
Egbert Eiche5868a32013-02-28 04:17:12 -05003785 /* enable bits are the same for all generations */
Egbert Eichcd569ae2013-04-16 13:36:57 +02003786 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3787 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3788 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
Egbert Eichbac56d52013-02-25 12:06:51 -05003789 /* Programming the CRT detection parameters tends
3790 to generate a spurious hotplug event about three
3791 seconds later. So just do it once.
3792 */
3793 if (IS_G4X(dev))
3794 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Daniel Vetter85fc95b2013-03-27 15:47:11 +01003795 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
Egbert Eichbac56d52013-02-25 12:06:51 -05003796 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003797
Egbert Eichbac56d52013-02-25 12:06:51 -05003798 /* Ignore TV since it's buggy */
3799 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3800 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003801}
3802
Daniel Vetterff1f5252012-10-02 15:10:55 +02003803static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003804{
3805 struct drm_device *dev = (struct drm_device *) arg;
Jani Nikula2d1013d2014-03-31 14:27:17 +03003806 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003807 u32 iir, new_iir;
3808 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003809 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003810 int ret = IRQ_NONE, pipe;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003811 u32 flip_mask =
3812 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3813 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003814
Chris Wilsona266c7d2012-04-24 22:59:44 +01003815 iir = I915_READ(IIR);
3816
Chris Wilsona266c7d2012-04-24 22:59:44 +01003817 for (;;) {
Ville Syrjälä501e01d2014-01-17 11:35:15 +02003818 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson2c8ba292012-04-24 22:59:46 +01003819 bool blc_event = false;
3820
Chris Wilsona266c7d2012-04-24 22:59:44 +01003821 /* Can't rely on pipestat interrupt bit in iir as it might
3822 * have been cleared after the pipestat interrupt was received.
3823 * It doesn't set the bit in iir again, but it still produces
3824 * interrupts (for non-MSI).
3825 */
3826 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3827 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Mika Kuoppala58174462014-02-25 17:11:26 +02003828 i915_handle_error(dev, false,
3829 "Command parser error, iir 0x%08x",
3830 iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003831
3832 for_each_pipe(pipe) {
3833 int reg = PIPESTAT(pipe);
3834 pipe_stats[pipe] = I915_READ(reg);
3835
3836 /*
3837 * Clear the PIPE*STAT regs before the IIR
3838 */
3839 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003840 I915_WRITE(reg, pipe_stats[pipe]);
Ville Syrjälä501e01d2014-01-17 11:35:15 +02003841 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003842 }
3843 }
3844 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3845
3846 if (!irq_received)
3847 break;
3848
3849 ret = IRQ_HANDLED;
3850
3851 /* Consume port. Then clear IIR or we'll miss events */
Ville Syrjälä16c6c562014-04-01 10:54:36 +03003852 if (iir & I915_DISPLAY_PORT_INTERRUPT)
3853 i9xx_hpd_irq_handler(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003854
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003855 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003856 new_iir = I915_READ(IIR); /* Flush posted writes */
3857
Chris Wilsona266c7d2012-04-24 22:59:44 +01003858 if (iir & I915_USER_INTERRUPT)
3859 notify_ring(dev, &dev_priv->ring[RCS]);
3860 if (iir & I915_BSD_USER_INTERRUPT)
3861 notify_ring(dev, &dev_priv->ring[VCS]);
3862
Chris Wilsona266c7d2012-04-24 22:59:44 +01003863 for_each_pipe(pipe) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01003864 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003865 i915_handle_vblank(dev, pipe, pipe, iir))
3866 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003867
3868 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3869 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003870
3871 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003872 i9xx_pipe_crc_irq_handler(dev, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003873
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003874 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3875 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02003876 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003877 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003878
3879 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3880 intel_opregion_asle_intr(dev);
3881
Daniel Vetter515ac2b2012-12-01 13:53:44 +01003882 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3883 gmbus_irq_handler(dev);
3884
Chris Wilsona266c7d2012-04-24 22:59:44 +01003885 /* With MSI, interrupts are only generated when iir
3886 * transitions from zero to nonzero. If another bit got
3887 * set while we were handling the existing iir bits, then
3888 * we would never get another interrupt.
3889 *
3890 * This is fine on non-MSI as well, as if we hit this path
3891 * we avoid exiting the interrupt handler only to generate
3892 * another one.
3893 *
3894 * Note that for MSI this could cause a stray interrupt report
3895 * if an interrupt landed in the time between writing IIR and
3896 * the posting read. This should be rare enough to never
3897 * trigger the 99% of 100,000 interrupts test for disabling
3898 * stray interrupts.
3899 */
3900 iir = new_iir;
3901 }
3902
Daniel Vetterd05c6172012-04-26 23:28:09 +02003903 i915_update_dri1_breadcrumb(dev);
Chris Wilson2c8ba292012-04-24 22:59:46 +01003904
Chris Wilsona266c7d2012-04-24 22:59:44 +01003905 return ret;
3906}
3907
3908static void i965_irq_uninstall(struct drm_device * dev)
3909{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003910 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003911 int pipe;
3912
3913 if (!dev_priv)
3914 return;
3915
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003916 intel_hpd_irq_uninstall(dev_priv);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003917
Chris Wilsonadca4732012-05-11 18:01:31 +01003918 I915_WRITE(PORT_HOTPLUG_EN, 0);
3919 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003920
3921 I915_WRITE(HWSTAM, 0xffffffff);
3922 for_each_pipe(pipe)
3923 I915_WRITE(PIPESTAT(pipe), 0);
3924 I915_WRITE(IMR, 0xffffffff);
3925 I915_WRITE(IER, 0x0);
3926
3927 for_each_pipe(pipe)
3928 I915_WRITE(PIPESTAT(pipe),
3929 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3930 I915_WRITE(IIR, I915_READ(IIR));
3931}
3932
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003933static void intel_hpd_irq_reenable(unsigned long data)
Egbert Eichac4c16c2013-04-16 13:36:58 +02003934{
Jani Nikula2d1013d2014-03-31 14:27:17 +03003935 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
Egbert Eichac4c16c2013-04-16 13:36:58 +02003936 struct drm_device *dev = dev_priv->dev;
3937 struct drm_mode_config *mode_config = &dev->mode_config;
3938 unsigned long irqflags;
3939 int i;
3940
3941 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3942 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3943 struct drm_connector *connector;
3944
3945 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3946 continue;
3947
3948 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3949
3950 list_for_each_entry(connector, &mode_config->connector_list, head) {
3951 struct intel_connector *intel_connector = to_intel_connector(connector);
3952
3953 if (intel_connector->encoder->hpd_pin == i) {
3954 if (connector->polled != intel_connector->polled)
3955 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3956 drm_get_connector_name(connector));
3957 connector->polled = intel_connector->polled;
3958 if (!connector->polled)
3959 connector->polled = DRM_CONNECTOR_POLL_HPD;
3960 }
3961 }
3962 }
3963 if (dev_priv->display.hpd_irq_setup)
3964 dev_priv->display.hpd_irq_setup(dev);
3965 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3966}
3967
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003968void intel_irq_init(struct drm_device *dev)
3969{
Chris Wilson8b2e3262012-04-24 22:59:41 +01003970 struct drm_i915_private *dev_priv = dev->dev_private;
3971
3972 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
Daniel Vetter99584db2012-11-14 17:14:04 +01003973 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003974 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01003975 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Chris Wilson8b2e3262012-04-24 22:59:41 +01003976
Deepak Sa6706b42014-03-15 20:23:22 +05303977 /* Let's track the enabled rps events */
3978 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
3979
Daniel Vetter99584db2012-11-14 17:14:04 +01003980 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3981 i915_hangcheck_elapsed,
Daniel Vetter61bac782012-12-01 21:03:21 +01003982 (unsigned long) dev);
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003983 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
Egbert Eichac4c16c2013-04-16 13:36:58 +02003984 (unsigned long) dev_priv);
Daniel Vetter61bac782012-12-01 21:03:21 +01003985
Tomas Janousek97a19a22012-12-08 13:48:13 +01003986 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01003987
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +03003988 if (IS_GEN2(dev)) {
3989 dev->max_vblank_count = 0;
3990 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
3991 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003992 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3993 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
Ville Syrjälä391f75e2013-09-25 19:55:26 +03003994 } else {
3995 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3996 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003997 }
3998
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03003999 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
Keith Packardc3613de2011-08-12 17:05:54 -07004000 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03004001 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4002 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004003
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07004004 if (IS_VALLEYVIEW(dev)) {
4005 dev->driver->irq_handler = valleyview_irq_handler;
4006 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4007 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4008 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4009 dev->driver->enable_vblank = valleyview_enable_vblank;
4010 dev->driver->disable_vblank = valleyview_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05004011 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Ben Widawskyabd58f02013-11-02 21:07:09 -07004012 } else if (IS_GEN8(dev)) {
4013 dev->driver->irq_handler = gen8_irq_handler;
4014 dev->driver->irq_preinstall = gen8_irq_preinstall;
4015 dev->driver->irq_postinstall = gen8_irq_postinstall;
4016 dev->driver->irq_uninstall = gen8_irq_uninstall;
4017 dev->driver->enable_vblank = gen8_enable_vblank;
4018 dev->driver->disable_vblank = gen8_disable_vblank;
4019 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004020 } else if (HAS_PCH_SPLIT(dev)) {
4021 dev->driver->irq_handler = ironlake_irq_handler;
4022 dev->driver->irq_preinstall = ironlake_irq_preinstall;
4023 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4024 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4025 dev->driver->enable_vblank = ironlake_enable_vblank;
4026 dev->driver->disable_vblank = ironlake_disable_vblank;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01004027 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004028 } else {
Chris Wilsonc2798b12012-04-22 21:13:57 +01004029 if (INTEL_INFO(dev)->gen == 2) {
4030 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4031 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4032 dev->driver->irq_handler = i8xx_irq_handler;
4033 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004034 } else if (INTEL_INFO(dev)->gen == 3) {
4035 dev->driver->irq_preinstall = i915_irq_preinstall;
4036 dev->driver->irq_postinstall = i915_irq_postinstall;
4037 dev->driver->irq_uninstall = i915_irq_uninstall;
4038 dev->driver->irq_handler = i915_irq_handler;
Daniel Vetter20afbda2012-12-11 14:05:07 +01004039 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004040 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004041 dev->driver->irq_preinstall = i965_irq_preinstall;
4042 dev->driver->irq_postinstall = i965_irq_postinstall;
4043 dev->driver->irq_uninstall = i965_irq_uninstall;
4044 dev->driver->irq_handler = i965_irq_handler;
Egbert Eichbac56d52013-02-25 12:06:51 -05004045 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004046 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004047 dev->driver->enable_vblank = i915_enable_vblank;
4048 dev->driver->disable_vblank = i915_disable_vblank;
4049 }
4050}
Daniel Vetter20afbda2012-12-11 14:05:07 +01004051
4052void intel_hpd_init(struct drm_device *dev)
4053{
4054 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eich821450c2013-04-16 13:36:55 +02004055 struct drm_mode_config *mode_config = &dev->mode_config;
4056 struct drm_connector *connector;
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02004057 unsigned long irqflags;
Egbert Eich821450c2013-04-16 13:36:55 +02004058 int i;
Daniel Vetter20afbda2012-12-11 14:05:07 +01004059
Egbert Eich821450c2013-04-16 13:36:55 +02004060 for (i = 1; i < HPD_NUM_PINS; i++) {
4061 dev_priv->hpd_stats[i].hpd_cnt = 0;
4062 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4063 }
4064 list_for_each_entry(connector, &mode_config->connector_list, head) {
4065 struct intel_connector *intel_connector = to_intel_connector(connector);
4066 connector->polled = intel_connector->polled;
4067 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4068 connector->polled = DRM_CONNECTOR_POLL_HPD;
4069 }
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02004070
4071 /* Interrupt setup is already guaranteed to be single-threaded, this is
4072 * just to make the assert_spin_locked checks happy. */
4073 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004074 if (dev_priv->display.hpd_irq_setup)
4075 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02004076 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004077}
Paulo Zanonic67a4702013-08-19 13:18:09 -03004078
Paulo Zanoni5d584b22014-03-07 20:08:15 -03004079/* Disable interrupts so we can allow runtime PM. */
Paulo Zanoni730488b2014-03-07 20:12:32 -03004080void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004081{
4082 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonic67a4702013-08-19 13:18:09 -03004083
Paulo Zanoni730488b2014-03-07 20:12:32 -03004084 dev->driver->irq_uninstall(dev);
Paulo Zanoni5d584b22014-03-07 20:08:15 -03004085 dev_priv->pm.irqs_disabled = true;
Paulo Zanonic67a4702013-08-19 13:18:09 -03004086}
4087
Paulo Zanoni5d584b22014-03-07 20:08:15 -03004088/* Restore interrupts so we can recover from runtime PM. */
Paulo Zanoni730488b2014-03-07 20:12:32 -03004089void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004090{
4091 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonic67a4702013-08-19 13:18:09 -03004092
Paulo Zanoni5d584b22014-03-07 20:08:15 -03004093 dev_priv->pm.irqs_disabled = false;
Paulo Zanoni730488b2014-03-07 20:12:32 -03004094 dev->driver->irq_preinstall(dev);
4095 dev->driver->irq_postinstall(dev);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004096}