blob: a228176676b201b4b812902410c6bfba48e8f15d [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Damien Lespiaub2c88f52013-10-15 18:55:29 +010033#include <linux/circ_buf.h>
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/drmP.h>
35#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010037#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Egbert Eiche5868a32013-02-28 04:17:12 -050040static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46};
47
48static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010050 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050051 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54};
55
56static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63};
64
65static const u32 hpd_status_gen4[] = {
66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72};
73
Egbert Eiche5868a32013-02-28 04:17:12 -050074static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81};
82
Zhenyu Wang036a4a72009-06-08 14:40:19 +080083/* For display hotplug interrupt */
Chris Wilson995b6762010-08-20 13:23:26 +010084static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -050085ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +080086{
Daniel Vetter4bc9d432013-06-27 13:44:58 +020087 assert_spin_locked(&dev_priv->irq_lock);
88
Paulo Zanonic67a4702013-08-19 13:18:09 -030089 if (dev_priv->pc8.irqs_disabled) {
90 WARN(1, "IRQs disabled\n");
91 dev_priv->pc8.regsave.deimr &= ~mask;
92 return;
93 }
94
Chris Wilson1ec14ad2010-12-04 11:30:53 +000095 if ((dev_priv->irq_mask & mask) != 0) {
96 dev_priv->irq_mask &= ~mask;
97 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +000098 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +080099 }
100}
101
Paulo Zanoni0ff98002013-02-22 17:05:31 -0300102static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -0500103ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800104{
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200105 assert_spin_locked(&dev_priv->irq_lock);
106
Paulo Zanonic67a4702013-08-19 13:18:09 -0300107 if (dev_priv->pc8.irqs_disabled) {
108 WARN(1, "IRQs disabled\n");
109 dev_priv->pc8.regsave.deimr |= mask;
110 return;
111 }
112
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000113 if ((dev_priv->irq_mask & mask) != mask) {
114 dev_priv->irq_mask |= mask;
115 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000116 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800117 }
118}
119
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300120/**
121 * ilk_update_gt_irq - update GTIMR
122 * @dev_priv: driver private
123 * @interrupt_mask: mask of interrupt bits to update
124 * @enabled_irq_mask: mask of interrupt bits to enable
125 */
126static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
127 uint32_t interrupt_mask,
128 uint32_t enabled_irq_mask)
129{
130 assert_spin_locked(&dev_priv->irq_lock);
131
Paulo Zanonic67a4702013-08-19 13:18:09 -0300132 if (dev_priv->pc8.irqs_disabled) {
133 WARN(1, "IRQs disabled\n");
134 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
135 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
136 interrupt_mask);
137 return;
138 }
139
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300140 dev_priv->gt_irq_mask &= ~interrupt_mask;
141 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
142 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
143 POSTING_READ(GTIMR);
144}
145
146void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
147{
148 ilk_update_gt_irq(dev_priv, mask, mask);
149}
150
151void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
152{
153 ilk_update_gt_irq(dev_priv, mask, 0);
154}
155
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300156/**
157 * snb_update_pm_irq - update GEN6_PMIMR
158 * @dev_priv: driver private
159 * @interrupt_mask: mask of interrupt bits to update
160 * @enabled_irq_mask: mask of interrupt bits to enable
161 */
162static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
163 uint32_t interrupt_mask,
164 uint32_t enabled_irq_mask)
165{
Paulo Zanoni605cd252013-08-06 18:57:15 -0300166 uint32_t new_val;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300167
168 assert_spin_locked(&dev_priv->irq_lock);
169
Paulo Zanonic67a4702013-08-19 13:18:09 -0300170 if (dev_priv->pc8.irqs_disabled) {
171 WARN(1, "IRQs disabled\n");
172 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
173 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
174 interrupt_mask);
175 return;
176 }
177
Paulo Zanoni605cd252013-08-06 18:57:15 -0300178 new_val = dev_priv->pm_irq_mask;
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300179 new_val &= ~interrupt_mask;
180 new_val |= (~enabled_irq_mask & interrupt_mask);
181
Paulo Zanoni605cd252013-08-06 18:57:15 -0300182 if (new_val != dev_priv->pm_irq_mask) {
183 dev_priv->pm_irq_mask = new_val;
184 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300185 POSTING_READ(GEN6_PMIMR);
186 }
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300187}
188
189void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
190{
191 snb_update_pm_irq(dev_priv, mask, mask);
192}
193
194void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
195{
196 snb_update_pm_irq(dev_priv, mask, 0);
197}
198
Paulo Zanoni86642812013-04-12 17:57:57 -0300199static bool ivb_can_enable_err_int(struct drm_device *dev)
200{
201 struct drm_i915_private *dev_priv = dev->dev_private;
202 struct intel_crtc *crtc;
203 enum pipe pipe;
204
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200205 assert_spin_locked(&dev_priv->irq_lock);
206
Paulo Zanoni86642812013-04-12 17:57:57 -0300207 for_each_pipe(pipe) {
208 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
209
210 if (crtc->cpu_fifo_underrun_disabled)
211 return false;
212 }
213
214 return true;
215}
216
217static bool cpt_can_enable_serr_int(struct drm_device *dev)
218{
219 struct drm_i915_private *dev_priv = dev->dev_private;
220 enum pipe pipe;
221 struct intel_crtc *crtc;
222
Daniel Vetterfee884e2013-07-04 23:35:21 +0200223 assert_spin_locked(&dev_priv->irq_lock);
224
Paulo Zanoni86642812013-04-12 17:57:57 -0300225 for_each_pipe(pipe) {
226 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
227
228 if (crtc->pch_fifo_underrun_disabled)
229 return false;
230 }
231
232 return true;
233}
234
235static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
236 enum pipe pipe, bool enable)
237{
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
240 DE_PIPEB_FIFO_UNDERRUN;
241
242 if (enable)
243 ironlake_enable_display_irq(dev_priv, bit);
244 else
245 ironlake_disable_display_irq(dev_priv, bit);
246}
247
248static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
Daniel Vetter7336df62013-07-09 22:59:16 +0200249 enum pipe pipe, bool enable)
Paulo Zanoni86642812013-04-12 17:57:57 -0300250{
251 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni86642812013-04-12 17:57:57 -0300252 if (enable) {
Daniel Vetter7336df62013-07-09 22:59:16 +0200253 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
254
Paulo Zanoni86642812013-04-12 17:57:57 -0300255 if (!ivb_can_enable_err_int(dev))
256 return;
257
Paulo Zanoni86642812013-04-12 17:57:57 -0300258 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
259 } else {
Daniel Vetter7336df62013-07-09 22:59:16 +0200260 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
261
262 /* Change the state _after_ we've read out the current one. */
Paulo Zanoni86642812013-04-12 17:57:57 -0300263 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
Daniel Vetter7336df62013-07-09 22:59:16 +0200264
265 if (!was_enabled &&
266 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
267 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
268 pipe_name(pipe));
269 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300270 }
271}
272
Daniel Vetterfee884e2013-07-04 23:35:21 +0200273/**
274 * ibx_display_interrupt_update - update SDEIMR
275 * @dev_priv: driver private
276 * @interrupt_mask: mask of interrupt bits to update
277 * @enabled_irq_mask: mask of interrupt bits to enable
278 */
279static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
280 uint32_t interrupt_mask,
281 uint32_t enabled_irq_mask)
282{
283 uint32_t sdeimr = I915_READ(SDEIMR);
284 sdeimr &= ~interrupt_mask;
285 sdeimr |= (~enabled_irq_mask & interrupt_mask);
286
287 assert_spin_locked(&dev_priv->irq_lock);
288
Paulo Zanonic67a4702013-08-19 13:18:09 -0300289 if (dev_priv->pc8.irqs_disabled &&
290 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
291 WARN(1, "IRQs disabled\n");
292 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
293 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
294 interrupt_mask);
295 return;
296 }
297
Daniel Vetterfee884e2013-07-04 23:35:21 +0200298 I915_WRITE(SDEIMR, sdeimr);
299 POSTING_READ(SDEIMR);
300}
301#define ibx_enable_display_interrupt(dev_priv, bits) \
302 ibx_display_interrupt_update((dev_priv), (bits), (bits))
303#define ibx_disable_display_interrupt(dev_priv, bits) \
304 ibx_display_interrupt_update((dev_priv), (bits), 0)
305
Daniel Vetterde280752013-07-04 23:35:24 +0200306static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
307 enum transcoder pch_transcoder,
Paulo Zanoni86642812013-04-12 17:57:57 -0300308 bool enable)
309{
Paulo Zanoni86642812013-04-12 17:57:57 -0300310 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200311 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
312 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
Paulo Zanoni86642812013-04-12 17:57:57 -0300313
314 if (enable)
Daniel Vetterfee884e2013-07-04 23:35:21 +0200315 ibx_enable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300316 else
Daniel Vetterfee884e2013-07-04 23:35:21 +0200317 ibx_disable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300318}
319
320static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
321 enum transcoder pch_transcoder,
322 bool enable)
323{
324 struct drm_i915_private *dev_priv = dev->dev_private;
325
326 if (enable) {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200327 I915_WRITE(SERR_INT,
328 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
329
Paulo Zanoni86642812013-04-12 17:57:57 -0300330 if (!cpt_can_enable_serr_int(dev))
331 return;
332
Daniel Vetterfee884e2013-07-04 23:35:21 +0200333 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Paulo Zanoni86642812013-04-12 17:57:57 -0300334 } else {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200335 uint32_t tmp = I915_READ(SERR_INT);
336 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
337
338 /* Change the state _after_ we've read out the current one. */
Daniel Vetterfee884e2013-07-04 23:35:21 +0200339 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200340
341 if (!was_enabled &&
342 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
343 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
344 transcoder_name(pch_transcoder));
345 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300346 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300347}
348
349/**
350 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
351 * @dev: drm device
352 * @pipe: pipe
353 * @enable: true if we want to report FIFO underrun errors, false otherwise
354 *
355 * This function makes us disable or enable CPU fifo underruns for a specific
356 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
357 * reporting for one pipe may also disable all the other CPU error interruts for
358 * the other pipes, due to the fact that there's just one interrupt mask/enable
359 * bit for all the pipes.
360 *
361 * Returns the previous state of underrun reporting.
362 */
363bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
364 enum pipe pipe, bool enable)
365{
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
368 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
369 unsigned long flags;
370 bool ret;
371
372 spin_lock_irqsave(&dev_priv->irq_lock, flags);
373
374 ret = !intel_crtc->cpu_fifo_underrun_disabled;
375
376 if (enable == ret)
377 goto done;
378
379 intel_crtc->cpu_fifo_underrun_disabled = !enable;
380
381 if (IS_GEN5(dev) || IS_GEN6(dev))
382 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
383 else if (IS_GEN7(dev))
Daniel Vetter7336df62013-07-09 22:59:16 +0200384 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300385
386done:
387 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
388 return ret;
389}
390
391/**
392 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
393 * @dev: drm device
394 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
395 * @enable: true if we want to report FIFO underrun errors, false otherwise
396 *
397 * This function makes us disable or enable PCH fifo underruns for a specific
398 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
399 * underrun reporting for one transcoder may also disable all the other PCH
400 * error interruts for the other transcoders, due to the fact that there's just
401 * one interrupt mask/enable bit for all the transcoders.
402 *
403 * Returns the previous state of underrun reporting.
404 */
405bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
406 enum transcoder pch_transcoder,
407 bool enable)
408{
409 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200410 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
411 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni86642812013-04-12 17:57:57 -0300412 unsigned long flags;
413 bool ret;
414
Daniel Vetterde280752013-07-04 23:35:24 +0200415 /*
416 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
417 * has only one pch transcoder A that all pipes can use. To avoid racy
418 * pch transcoder -> pipe lookups from interrupt code simply store the
419 * underrun statistics in crtc A. Since we never expose this anywhere
420 * nor use it outside of the fifo underrun code here using the "wrong"
421 * crtc on LPT won't cause issues.
422 */
Paulo Zanoni86642812013-04-12 17:57:57 -0300423
424 spin_lock_irqsave(&dev_priv->irq_lock, flags);
425
426 ret = !intel_crtc->pch_fifo_underrun_disabled;
427
428 if (enable == ret)
429 goto done;
430
431 intel_crtc->pch_fifo_underrun_disabled = !enable;
432
433 if (HAS_PCH_IBX(dev))
Daniel Vetterde280752013-07-04 23:35:24 +0200434 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300435 else
436 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
437
438done:
439 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
440 return ret;
441}
442
443
Keith Packard7c463582008-11-04 02:03:27 -0800444void
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200445i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
Keith Packard7c463582008-11-04 02:03:27 -0800446{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200447 u32 reg = PIPESTAT(pipe);
448 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800449
Daniel Vetterb79480b2013-06-27 17:52:10 +0200450 assert_spin_locked(&dev_priv->irq_lock);
451
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200452 if ((pipestat & mask) == mask)
453 return;
454
455 /* Enable the interrupt, clear any pending status */
456 pipestat |= mask | (mask >> 16);
457 I915_WRITE(reg, pipestat);
458 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800459}
460
461void
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200462i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
Keith Packard7c463582008-11-04 02:03:27 -0800463{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200464 u32 reg = PIPESTAT(pipe);
465 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800466
Daniel Vetterb79480b2013-06-27 17:52:10 +0200467 assert_spin_locked(&dev_priv->irq_lock);
468
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200469 if ((pipestat & mask) == 0)
470 return;
471
472 pipestat &= ~mask;
473 I915_WRITE(reg, pipestat);
474 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800475}
476
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000477/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300478 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Zhao Yakui01c66882009-10-28 05:10:00 +0000479 */
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300480static void i915_enable_asle_pipestat(struct drm_device *dev)
Zhao Yakui01c66882009-10-28 05:10:00 +0000481{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000482 drm_i915_private_t *dev_priv = dev->dev_private;
483 unsigned long irqflags;
484
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300485 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
486 return;
487
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000488 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000489
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200490 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
Jani Nikulaf8987802013-04-29 13:02:53 +0300491 if (INTEL_INFO(dev)->gen >= 4)
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200492 i915_enable_pipestat(dev_priv, PIPE_A,
493 PIPE_LEGACY_BLC_EVENT_ENABLE);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000494
495 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000496}
497
498/**
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700499 * i915_pipe_enabled - check if a pipe is enabled
500 * @dev: DRM device
501 * @pipe: pipe to check
502 *
503 * Reading certain registers when the pipe is disabled can hang the chip.
504 * Use this routine to make sure the PLL is running and the pipe is active
505 * before reading such registers if unsure.
506 */
507static int
508i915_pipe_enabled(struct drm_device *dev, int pipe)
509{
510 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni702e7a52012-10-23 18:29:59 -0200511
Daniel Vettera01025a2013-05-22 00:50:23 +0200512 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
513 /* Locking is horribly broken here, but whatever. */
514 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
515 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni71f8ba62013-05-03 12:15:39 -0300516
Daniel Vettera01025a2013-05-22 00:50:23 +0200517 return intel_crtc->active;
518 } else {
519 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
520 }
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700521}
522
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +0300523static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
524{
525 /* Gen2 doesn't have a hardware frame counter */
526 return 0;
527}
528
Keith Packard42f52ef2008-10-18 19:39:29 -0700529/* Called from drm generic code, passed a 'crtc', which
530 * we use as a pipe index
531 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700532static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700533{
534 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
535 unsigned long high_frame;
536 unsigned long low_frame;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300537 u32 high1, high2, low, pixel, vbl_start;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700538
539 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800540 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800541 "pipe %c\n", pipe_name(pipe));
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700542 return 0;
543 }
544
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300545 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
546 struct intel_crtc *intel_crtc =
547 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
548 const struct drm_display_mode *mode =
549 &intel_crtc->config.adjusted_mode;
550
551 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
552 } else {
553 enum transcoder cpu_transcoder =
554 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
555 u32 htotal;
556
557 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
558 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
559
560 vbl_start *= htotal;
561 }
562
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800563 high_frame = PIPEFRAME(pipe);
564 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100565
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700566 /*
567 * High & low register fields aren't synchronized, so make sure
568 * we get a low value that's stable across two reads of the high
569 * register.
570 */
571 do {
Chris Wilson5eddb702010-09-11 13:48:45 +0100572 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300573 low = I915_READ(low_frame);
Chris Wilson5eddb702010-09-11 13:48:45 +0100574 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700575 } while (high1 != high2);
576
Chris Wilson5eddb702010-09-11 13:48:45 +0100577 high1 >>= PIPE_FRAME_HIGH_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300578 pixel = low & PIPE_PIXEL_MASK;
Chris Wilson5eddb702010-09-11 13:48:45 +0100579 low >>= PIPE_FRAME_LOW_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300580
581 /*
582 * The frame counter increments at beginning of active.
583 * Cook up a vblank counter by also checking the pixel
584 * counter against vblank start.
585 */
586 return ((high1 << 8) | low) + (pixel >= vbl_start);
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700587}
588
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700589static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800590{
591 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800592 int reg = PIPE_FRMCOUNT_GM45(pipe);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800593
594 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800595 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800596 "pipe %c\n", pipe_name(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800597 return 0;
598 }
599
600 return I915_READ(reg);
601}
602
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300603static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe)
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300604{
605 struct drm_i915_private *dev_priv = dev->dev_private;
606 uint32_t status;
607
608 if (IS_VALLEYVIEW(dev)) {
609 status = pipe == PIPE_A ?
610 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
611 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
612
613 return I915_READ(VLV_ISR) & status;
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300614 } else if (IS_GEN2(dev)) {
615 status = pipe == PIPE_A ?
616 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
617 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
618
619 return I915_READ16(ISR) & status;
620 } else if (INTEL_INFO(dev)->gen < 5) {
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300621 status = pipe == PIPE_A ?
622 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
623 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
624
625 return I915_READ(ISR) & status;
626 } else if (INTEL_INFO(dev)->gen < 7) {
627 status = pipe == PIPE_A ?
628 DE_PIPEA_VBLANK :
629 DE_PIPEB_VBLANK;
630
631 return I915_READ(DEISR) & status;
632 } else {
633 switch (pipe) {
634 default:
635 case PIPE_A:
636 status = DE_PIPEA_VBLANK_IVB;
637 break;
638 case PIPE_B:
639 status = DE_PIPEB_VBLANK_IVB;
640 break;
641 case PIPE_C:
642 status = DE_PIPEC_VBLANK_IVB;
643 break;
644 }
645
646 return I915_READ(DEISR) & status;
647 }
648}
649
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700650static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100651 int *vpos, int *hpos)
652{
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300653 struct drm_i915_private *dev_priv = dev->dev_private;
654 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
655 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
656 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300657 int position;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100658 int vbl_start, vbl_end, htotal, vtotal;
659 bool in_vbl = true;
660 int ret = 0;
661
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300662 if (!intel_crtc->active) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100663 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800664 "pipe %c\n", pipe_name(pipe));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100665 return 0;
666 }
667
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300668 htotal = mode->crtc_htotal;
669 vtotal = mode->crtc_vtotal;
670 vbl_start = mode->crtc_vblank_start;
671 vbl_end = mode->crtc_vblank_end;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100672
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300673 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
674
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300675 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100676 /* No obvious pixelcount register. Only query vertical
677 * scanout position from Display scan line register.
678 */
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300679 if (IS_GEN2(dev))
680 position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
681 else
682 position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300683
684 /*
685 * The scanline counter increments at the leading edge
686 * of hsync, ie. it completely misses the active portion
687 * of the line. Fix up the counter at both edges of vblank
688 * to get a more accurate picture whether we're in vblank
689 * or not.
690 */
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300691 in_vbl = intel_pipe_in_vblank(dev, pipe);
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300692 if ((in_vbl && position == vbl_start - 1) ||
693 (!in_vbl && position == vbl_end - 1))
694 position = (position + 1) % vtotal;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100695 } else {
696 /* Have access to pixelcount since start of frame.
697 * We can split this into vertical and horizontal
698 * scanout position.
699 */
700 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
701
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300702 /* convert to pixel counts */
703 vbl_start *= htotal;
704 vbl_end *= htotal;
705 vtotal *= htotal;
706 }
707
708 in_vbl = position >= vbl_start && position < vbl_end;
709
710 /*
711 * While in vblank, position will be negative
712 * counting up towards 0 at vbl_end. And outside
713 * vblank, position will be positive counting
714 * up since vbl_end.
715 */
716 if (position >= vbl_start)
717 position -= vbl_end;
718 else
719 position += vtotal - vbl_end;
720
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300721 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300722 *vpos = position;
723 *hpos = 0;
724 } else {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100725 *vpos = position / htotal;
726 *hpos = position - (*vpos * htotal);
727 }
728
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100729 /* In vblank? */
730 if (in_vbl)
731 ret |= DRM_SCANOUTPOS_INVBL;
732
733 return ret;
734}
735
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700736static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100737 int *max_error,
738 struct timeval *vblank_time,
739 unsigned flags)
740{
Chris Wilson4041b852011-01-22 10:07:56 +0000741 struct drm_crtc *crtc;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100742
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700743 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
Chris Wilson4041b852011-01-22 10:07:56 +0000744 DRM_ERROR("Invalid crtc %d\n", pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100745 return -EINVAL;
746 }
747
748 /* Get drm_crtc to timestamp: */
Chris Wilson4041b852011-01-22 10:07:56 +0000749 crtc = intel_get_crtc_for_pipe(dev, pipe);
750 if (crtc == NULL) {
751 DRM_ERROR("Invalid crtc %d\n", pipe);
752 return -EINVAL;
753 }
754
755 if (!crtc->enabled) {
756 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
757 return -EBUSY;
758 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100759
760 /* Helper routine in DRM core does all the work: */
Chris Wilson4041b852011-01-22 10:07:56 +0000761 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
762 vblank_time, flags,
763 crtc);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100764}
765
Jani Nikula67c347f2013-09-17 14:26:34 +0300766static bool intel_hpd_irq_event(struct drm_device *dev,
767 struct drm_connector *connector)
Egbert Eich321a1b32013-04-11 16:00:26 +0200768{
769 enum drm_connector_status old_status;
770
771 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
772 old_status = connector->status;
773
774 connector->status = connector->funcs->detect(connector, false);
Jani Nikula67c347f2013-09-17 14:26:34 +0300775 if (old_status == connector->status)
776 return false;
777
778 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
Egbert Eich321a1b32013-04-11 16:00:26 +0200779 connector->base.id,
780 drm_get_connector_name(connector),
Jani Nikula67c347f2013-09-17 14:26:34 +0300781 drm_get_connector_status_name(old_status),
782 drm_get_connector_status_name(connector->status));
783
784 return true;
Egbert Eich321a1b32013-04-11 16:00:26 +0200785}
786
Jesse Barnes5ca58282009-03-31 14:11:15 -0700787/*
788 * Handle hotplug events outside the interrupt handler proper.
789 */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200790#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
791
Jesse Barnes5ca58282009-03-31 14:11:15 -0700792static void i915_hotplug_work_func(struct work_struct *work)
793{
794 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
795 hotplug_work);
796 struct drm_device *dev = dev_priv->dev;
Keith Packardc31c4ba2009-05-06 11:48:58 -0700797 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200798 struct intel_connector *intel_connector;
799 struct intel_encoder *intel_encoder;
800 struct drm_connector *connector;
801 unsigned long irqflags;
802 bool hpd_disabled = false;
Egbert Eich321a1b32013-04-11 16:00:26 +0200803 bool changed = false;
Egbert Eich142e2392013-04-11 15:57:57 +0200804 u32 hpd_event_bits;
Jesse Barnes5ca58282009-03-31 14:11:15 -0700805
Daniel Vetter52d7ece2012-12-01 21:03:22 +0100806 /* HPD irq before everything is fully set up. */
807 if (!dev_priv->enable_hotplug_processing)
808 return;
809
Keith Packarda65e34c2011-07-25 10:04:56 -0700810 mutex_lock(&mode_config->mutex);
Jesse Barnese67189ab2011-02-11 14:44:51 -0800811 DRM_DEBUG_KMS("running encoder hotplug functions\n");
812
Egbert Eichcd569ae2013-04-16 13:36:57 +0200813 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Egbert Eich142e2392013-04-11 15:57:57 +0200814
815 hpd_event_bits = dev_priv->hpd_event_bits;
816 dev_priv->hpd_event_bits = 0;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200817 list_for_each_entry(connector, &mode_config->connector_list, head) {
818 intel_connector = to_intel_connector(connector);
819 intel_encoder = intel_connector->encoder;
820 if (intel_encoder->hpd_pin > HPD_NONE &&
821 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
822 connector->polled == DRM_CONNECTOR_POLL_HPD) {
823 DRM_INFO("HPD interrupt storm detected on connector %s: "
824 "switching from hotplug detection to polling\n",
825 drm_get_connector_name(connector));
826 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
827 connector->polled = DRM_CONNECTOR_POLL_CONNECT
828 | DRM_CONNECTOR_POLL_DISCONNECT;
829 hpd_disabled = true;
830 }
Egbert Eich142e2392013-04-11 15:57:57 +0200831 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
832 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
833 drm_get_connector_name(connector), intel_encoder->hpd_pin);
834 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200835 }
836 /* if there were no outputs to poll, poll was disabled,
837 * therefore make sure it's enabled when disabling HPD on
838 * some connectors */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200839 if (hpd_disabled) {
Egbert Eichcd569ae2013-04-16 13:36:57 +0200840 drm_kms_helper_poll_enable(dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +0200841 mod_timer(&dev_priv->hotplug_reenable_timer,
842 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
843 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200844
845 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
846
Egbert Eich321a1b32013-04-11 16:00:26 +0200847 list_for_each_entry(connector, &mode_config->connector_list, head) {
848 intel_connector = to_intel_connector(connector);
849 intel_encoder = intel_connector->encoder;
850 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
851 if (intel_encoder->hot_plug)
852 intel_encoder->hot_plug(intel_encoder);
853 if (intel_hpd_irq_event(dev, connector))
854 changed = true;
855 }
856 }
Keith Packard40ee3382011-07-28 15:31:19 -0700857 mutex_unlock(&mode_config->mutex);
858
Egbert Eich321a1b32013-04-11 16:00:26 +0200859 if (changed)
860 drm_kms_helper_hotplug_event(dev);
Jesse Barnes5ca58282009-03-31 14:11:15 -0700861}
862
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200863static void ironlake_rps_change_irq_handler(struct drm_device *dev)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800864{
865 drm_i915_private_t *dev_priv = dev->dev_private;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000866 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +0200867 u8 new_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200868
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200869 spin_lock(&mchdev_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800870
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200871 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
872
Daniel Vetter20e4d402012-08-08 23:35:39 +0200873 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200874
Jesse Barnes7648fa92010-05-20 14:28:11 -0700875 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000876 busy_up = I915_READ(RCPREVBSYTUPAVG);
877 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800878 max_avg = I915_READ(RCBMAXAVG);
879 min_avg = I915_READ(RCBMINAVG);
880
881 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000882 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200883 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
884 new_delay = dev_priv->ips.cur_delay - 1;
885 if (new_delay < dev_priv->ips.max_delay)
886 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000887 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200888 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
889 new_delay = dev_priv->ips.cur_delay + 1;
890 if (new_delay > dev_priv->ips.min_delay)
891 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800892 }
893
Jesse Barnes7648fa92010-05-20 14:28:11 -0700894 if (ironlake_set_drps(dev, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +0200895 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800896
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200897 spin_unlock(&mchdev_lock);
Daniel Vetter92703882012-08-09 16:46:01 +0200898
Jesse Barnesf97108d2010-01-29 11:27:07 -0800899 return;
900}
901
Chris Wilson549f7362010-10-19 11:19:32 +0100902static void notify_ring(struct drm_device *dev,
903 struct intel_ring_buffer *ring)
904{
Chris Wilson475553d2011-01-20 09:52:56 +0000905 if (ring->obj == NULL)
906 return;
907
Chris Wilson814e9b52013-09-23 17:33:19 -0300908 trace_i915_gem_request_complete(ring);
Chris Wilson9862e602011-01-04 22:22:17 +0000909
Chris Wilson549f7362010-10-19 11:19:32 +0100910 wake_up_all(&ring->irq_queue);
Mika Kuoppala10cd45b2013-07-03 17:22:08 +0300911 i915_queue_hangcheck(dev);
Chris Wilson549f7362010-10-19 11:19:32 +0100912}
913
Ben Widawsky4912d042011-04-25 11:25:20 -0700914static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800915{
Ben Widawsky4912d042011-04-25 11:25:20 -0700916 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200917 rps.work);
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300918 u32 pm_iir;
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100919 int new_delay, adj;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800920
Daniel Vetter59cdb632013-07-04 23:35:28 +0200921 spin_lock_irq(&dev_priv->irq_lock);
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200922 pm_iir = dev_priv->rps.pm_iir;
923 dev_priv->rps.pm_iir = 0;
Ben Widawsky48484052013-05-28 19:22:27 -0700924 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300925 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
Daniel Vetter59cdb632013-07-04 23:35:28 +0200926 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawsky4912d042011-04-25 11:25:20 -0700927
Paulo Zanoni60611c12013-08-15 11:50:01 -0300928 /* Make sure we didn't queue anything we're not going to process. */
929 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
930
Ben Widawsky48484052013-05-28 19:22:27 -0700931 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800932 return;
933
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700934 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100935
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100936 adj = dev_priv->rps.last_adj;
Ville Syrjälä74250342013-06-25 21:38:11 +0300937 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100938 if (adj > 0)
939 adj *= 2;
940 else
941 adj = 1;
942 new_delay = dev_priv->rps.cur_delay + adj;
Ville Syrjälä74250342013-06-25 21:38:11 +0300943
944 /*
945 * For better performance, jump directly
946 * to RPe if we're below it.
947 */
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100948 if (new_delay < dev_priv->rps.rpe_delay)
Ville Syrjälä74250342013-06-25 21:38:11 +0300949 new_delay = dev_priv->rps.rpe_delay;
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100950 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
951 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
952 new_delay = dev_priv->rps.rpe_delay;
953 else
954 new_delay = dev_priv->rps.min_delay;
955 adj = 0;
956 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
957 if (adj < 0)
958 adj *= 2;
959 else
960 adj = -1;
961 new_delay = dev_priv->rps.cur_delay + adj;
962 } else { /* unknown event */
963 new_delay = dev_priv->rps.cur_delay;
964 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800965
Ben Widawsky79249632012-09-07 19:43:42 -0700966 /* sysfs frequency interfaces may have snuck in while servicing the
967 * interrupt
968 */
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100969 if (new_delay < (int)dev_priv->rps.min_delay)
970 new_delay = dev_priv->rps.min_delay;
971 if (new_delay > (int)dev_priv->rps.max_delay)
972 new_delay = dev_priv->rps.max_delay;
973 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
974
975 if (IS_VALLEYVIEW(dev_priv->dev))
976 valleyview_set_rps(dev_priv->dev, new_delay);
977 else
978 gen6_set_rps(dev_priv->dev, new_delay);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800979
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700980 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800981}
982
Ben Widawskye3689192012-05-25 16:56:22 -0700983
984/**
985 * ivybridge_parity_work - Workqueue called when a parity error interrupt
986 * occurred.
987 * @work: workqueue struct
988 *
989 * Doesn't actually do anything except notify userspace. As a consequence of
990 * this event, userspace should try to remap the bad rows since statistically
991 * it is likely the same row is more likely to go bad again.
992 */
993static void ivybridge_parity_work(struct work_struct *work)
994{
995 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vettera4da4fa2012-11-02 19:55:07 +0100996 l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -0700997 u32 error_status, row, bank, subbank;
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700998 char *parity_event[6];
Ben Widawskye3689192012-05-25 16:56:22 -0700999 uint32_t misccpctl;
1000 unsigned long flags;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001001 uint8_t slice = 0;
Ben Widawskye3689192012-05-25 16:56:22 -07001002
1003 /* We must turn off DOP level clock gating to access the L3 registers.
1004 * In order to prevent a get/put style interface, acquire struct mutex
1005 * any time we access those registers.
1006 */
1007 mutex_lock(&dev_priv->dev->struct_mutex);
1008
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001009 /* If we've screwed up tracking, just let the interrupt fire again */
1010 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1011 goto out;
1012
Ben Widawskye3689192012-05-25 16:56:22 -07001013 misccpctl = I915_READ(GEN7_MISCCPCTL);
1014 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1015 POSTING_READ(GEN7_MISCCPCTL);
1016
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001017 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1018 u32 reg;
Ben Widawskye3689192012-05-25 16:56:22 -07001019
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001020 slice--;
1021 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1022 break;
1023
1024 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1025
1026 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1027
1028 error_status = I915_READ(reg);
1029 row = GEN7_PARITY_ERROR_ROW(error_status);
1030 bank = GEN7_PARITY_ERROR_BANK(error_status);
1031 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1032
1033 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1034 POSTING_READ(reg);
1035
1036 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1037 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1038 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1039 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1040 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1041 parity_event[5] = NULL;
1042
1043 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
1044 KOBJ_CHANGE, parity_event);
1045
1046 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1047 slice, row, bank, subbank);
1048
1049 kfree(parity_event[4]);
1050 kfree(parity_event[3]);
1051 kfree(parity_event[2]);
1052 kfree(parity_event[1]);
1053 }
Ben Widawskye3689192012-05-25 16:56:22 -07001054
1055 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1056
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001057out:
1058 WARN_ON(dev_priv->l3_parity.which_slice);
Ben Widawskye3689192012-05-25 16:56:22 -07001059 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001060 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
Ben Widawskye3689192012-05-25 16:56:22 -07001061 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1062
1063 mutex_unlock(&dev_priv->dev->struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001064}
1065
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001066static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
Ben Widawskye3689192012-05-25 16:56:22 -07001067{
1068 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Ben Widawskye3689192012-05-25 16:56:22 -07001069
Ben Widawsky040d2ba2013-09-19 11:01:40 -07001070 if (!HAS_L3_DPF(dev))
Ben Widawskye3689192012-05-25 16:56:22 -07001071 return;
1072
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001073 spin_lock(&dev_priv->irq_lock);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001074 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001075 spin_unlock(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001076
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001077 iir &= GT_PARITY_ERROR(dev);
1078 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1079 dev_priv->l3_parity.which_slice |= 1 << 1;
1080
1081 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1082 dev_priv->l3_parity.which_slice |= 1 << 0;
1083
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001084 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001085}
1086
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001087static void ilk_gt_irq_handler(struct drm_device *dev,
1088 struct drm_i915_private *dev_priv,
1089 u32 gt_iir)
1090{
1091 if (gt_iir &
1092 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1093 notify_ring(dev, &dev_priv->ring[RCS]);
1094 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1095 notify_ring(dev, &dev_priv->ring[VCS]);
1096}
1097
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001098static void snb_gt_irq_handler(struct drm_device *dev,
1099 struct drm_i915_private *dev_priv,
1100 u32 gt_iir)
1101{
1102
Ben Widawskycc609d52013-05-28 19:22:29 -07001103 if (gt_iir &
1104 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001105 notify_ring(dev, &dev_priv->ring[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001106 if (gt_iir & GT_BSD_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001107 notify_ring(dev, &dev_priv->ring[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001108 if (gt_iir & GT_BLT_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001109 notify_ring(dev, &dev_priv->ring[BCS]);
1110
Ben Widawskycc609d52013-05-28 19:22:29 -07001111 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1112 GT_BSD_CS_ERROR_INTERRUPT |
1113 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001114 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
1115 i915_handle_error(dev, false);
1116 }
Ben Widawskye3689192012-05-25 16:56:22 -07001117
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001118 if (gt_iir & GT_PARITY_ERROR(dev))
1119 ivybridge_parity_error_irq_handler(dev, gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001120}
1121
Egbert Eichb543fb02013-04-16 13:36:54 +02001122#define HPD_STORM_DETECT_PERIOD 1000
1123#define HPD_STORM_THRESHOLD 5
1124
Daniel Vetter10a504d2013-06-27 17:52:12 +02001125static inline void intel_hpd_irq_handler(struct drm_device *dev,
Daniel Vetter22062db2013-06-27 17:52:11 +02001126 u32 hotplug_trigger,
1127 const u32 *hpd)
Egbert Eichb543fb02013-04-16 13:36:54 +02001128{
1129 drm_i915_private_t *dev_priv = dev->dev_private;
Egbert Eichb543fb02013-04-16 13:36:54 +02001130 int i;
Daniel Vetter10a504d2013-06-27 17:52:12 +02001131 bool storm_detected = false;
Egbert Eichb543fb02013-04-16 13:36:54 +02001132
Daniel Vetter91d131d2013-06-27 17:52:14 +02001133 if (!hotplug_trigger)
1134 return;
1135
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02001136 spin_lock(&dev_priv->irq_lock);
Egbert Eichb543fb02013-04-16 13:36:54 +02001137 for (i = 1; i < HPD_NUM_PINS; i++) {
Egbert Eich821450c2013-04-16 13:36:55 +02001138
Egbert Eichb8f102e2013-07-26 14:14:24 +02001139 WARN(((hpd[i] & hotplug_trigger) &&
1140 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
1141 "Received HPD interrupt although disabled\n");
1142
Egbert Eichb543fb02013-04-16 13:36:54 +02001143 if (!(hpd[i] & hotplug_trigger) ||
1144 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1145 continue;
1146
Jani Nikulabc5ead8c2013-05-07 15:10:29 +03001147 dev_priv->hpd_event_bits |= (1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001148 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1149 dev_priv->hpd_stats[i].hpd_last_jiffies
1150 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1151 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1152 dev_priv->hpd_stats[i].hpd_cnt = 0;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001153 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001154 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1155 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
Egbert Eich142e2392013-04-11 15:57:57 +02001156 dev_priv->hpd_event_bits &= ~(1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001157 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
Daniel Vetter10a504d2013-06-27 17:52:12 +02001158 storm_detected = true;
Egbert Eichb543fb02013-04-16 13:36:54 +02001159 } else {
1160 dev_priv->hpd_stats[i].hpd_cnt++;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001161 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1162 dev_priv->hpd_stats[i].hpd_cnt);
Egbert Eichb543fb02013-04-16 13:36:54 +02001163 }
1164 }
1165
Daniel Vetter10a504d2013-06-27 17:52:12 +02001166 if (storm_detected)
1167 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02001168 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter5876fa02013-06-27 17:52:13 +02001169
Daniel Vetter645416f2013-09-02 16:22:25 +02001170 /*
1171 * Our hotplug handler can grab modeset locks (by calling down into the
1172 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1173 * queue for otherwise the flush_work in the pageflip code will
1174 * deadlock.
1175 */
1176 schedule_work(&dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02001177}
1178
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001179static void gmbus_irq_handler(struct drm_device *dev)
1180{
Daniel Vetter28c70f12012-12-01 13:53:45 +01001181 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1182
Daniel Vetter28c70f12012-12-01 13:53:45 +01001183 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001184}
1185
Daniel Vetterce99c252012-12-01 13:53:47 +01001186static void dp_aux_irq_handler(struct drm_device *dev)
1187{
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001188 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1189
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001190 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +01001191}
1192
Shuang He8bf1e9f2013-10-15 18:55:27 +01001193#if defined(CONFIG_DEBUG_FS)
Daniel Vetter277de952013-10-18 16:37:07 +02001194static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1195 uint32_t crc0, uint32_t crc1,
1196 uint32_t crc2, uint32_t crc3,
1197 uint32_t crc4)
Shuang He8bf1e9f2013-10-15 18:55:27 +01001198{
1199 struct drm_i915_private *dev_priv = dev->dev_private;
1200 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1201 struct intel_pipe_crc_entry *entry;
Damien Lespiauac2300d2013-10-15 18:55:30 +01001202 int head, tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001203
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001204 spin_lock(&pipe_crc->lock);
1205
Damien Lespiau0c912c72013-10-15 18:55:37 +01001206 if (!pipe_crc->entries) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001207 spin_unlock(&pipe_crc->lock);
Damien Lespiau0c912c72013-10-15 18:55:37 +01001208 DRM_ERROR("spurious interrupt\n");
1209 return;
1210 }
1211
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001212 head = pipe_crc->head;
1213 tail = pipe_crc->tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001214
1215 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001216 spin_unlock(&pipe_crc->lock);
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001217 DRM_ERROR("CRC buffer overflowing\n");
1218 return;
1219 }
1220
1221 entry = &pipe_crc->entries[head];
Shuang He8bf1e9f2013-10-15 18:55:27 +01001222
Daniel Vetter8bc5e952013-10-16 22:55:49 +02001223 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
Daniel Vettereba94eb2013-10-16 22:55:46 +02001224 entry->crc[0] = crc0;
1225 entry->crc[1] = crc1;
1226 entry->crc[2] = crc2;
1227 entry->crc[3] = crc3;
1228 entry->crc[4] = crc4;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001229
1230 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001231 pipe_crc->head = head;
1232
1233 spin_unlock(&pipe_crc->lock);
Damien Lespiau07144422013-10-15 18:55:40 +01001234
1235 wake_up_interruptible(&pipe_crc->wq);
Shuang He8bf1e9f2013-10-15 18:55:27 +01001236}
Daniel Vetter277de952013-10-18 16:37:07 +02001237#else
1238static inline void
1239display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1240 uint32_t crc0, uint32_t crc1,
1241 uint32_t crc2, uint32_t crc3,
1242 uint32_t crc4) {}
1243#endif
Daniel Vettereba94eb2013-10-16 22:55:46 +02001244
Daniel Vetter277de952013-10-18 16:37:07 +02001245
1246static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vetter5a69b892013-10-16 22:55:52 +02001247{
1248 struct drm_i915_private *dev_priv = dev->dev_private;
1249
Daniel Vetter277de952013-10-18 16:37:07 +02001250 display_pipe_crc_irq_handler(dev, pipe,
1251 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1252 0, 0, 0, 0);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001253}
1254
Daniel Vetter277de952013-10-18 16:37:07 +02001255static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vettereba94eb2013-10-16 22:55:46 +02001256{
1257 struct drm_i915_private *dev_priv = dev->dev_private;
1258
Daniel Vetter277de952013-10-18 16:37:07 +02001259 display_pipe_crc_irq_handler(dev, pipe,
1260 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1261 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1262 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1263 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1264 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
Daniel Vettereba94eb2013-10-16 22:55:46 +02001265}
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001266
Daniel Vetter277de952013-10-18 16:37:07 +02001267static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001268{
1269 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001270 uint32_t res1, res2;
1271
1272 if (INTEL_INFO(dev)->gen >= 3)
1273 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1274 else
1275 res1 = 0;
1276
1277 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1278 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1279 else
1280 res2 = 0;
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001281
Daniel Vetter277de952013-10-18 16:37:07 +02001282 display_pipe_crc_irq_handler(dev, pipe,
1283 I915_READ(PIPE_CRC_RES_RED(pipe)),
1284 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1285 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1286 res1, res2);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001287}
Shuang He8bf1e9f2013-10-15 18:55:27 +01001288
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001289/* The RPS events need forcewake, so we add them to a work queue and mask their
1290 * IMR bits until the work is done. Other interrupts can be processed without
1291 * the work queue. */
1292static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
Ben Widawskybaf02a12013-05-28 19:22:24 -07001293{
Daniel Vetter41a05a32013-07-04 23:35:26 +02001294 if (pm_iir & GEN6_PM_RPS_EVENTS) {
Daniel Vetter59cdb632013-07-04 23:35:28 +02001295 spin_lock(&dev_priv->irq_lock);
Daniel Vetter41a05a32013-07-04 23:35:26 +02001296 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
Paulo Zanoni4d3b3d52013-08-09 17:04:36 -03001297 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
Daniel Vetter59cdb632013-07-04 23:35:28 +02001298 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter2adbee62013-07-04 23:35:27 +02001299
1300 queue_work(dev_priv->wq, &dev_priv->rps.work);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001301 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001302
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001303 if (HAS_VEBOX(dev_priv->dev)) {
1304 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1305 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
Ben Widawsky12638c52013-05-28 19:22:31 -07001306
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001307 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1308 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
1309 i915_handle_error(dev_priv->dev, false);
1310 }
Ben Widawsky12638c52013-05-28 19:22:31 -07001311 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001312}
1313
Daniel Vetterff1f5252012-10-02 15:10:55 +02001314static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001315{
1316 struct drm_device *dev = (struct drm_device *) arg;
1317 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1318 u32 iir, gt_iir, pm_iir;
1319 irqreturn_t ret = IRQ_NONE;
1320 unsigned long irqflags;
1321 int pipe;
1322 u32 pipe_stats[I915_MAX_PIPES];
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001323
1324 atomic_inc(&dev_priv->irq_received);
1325
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001326 while (true) {
1327 iir = I915_READ(VLV_IIR);
1328 gt_iir = I915_READ(GTIIR);
1329 pm_iir = I915_READ(GEN6_PMIIR);
1330
1331 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1332 goto out;
1333
1334 ret = IRQ_HANDLED;
1335
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001336 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001337
1338 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1339 for_each_pipe(pipe) {
1340 int reg = PIPESTAT(pipe);
1341 pipe_stats[pipe] = I915_READ(reg);
1342
1343 /*
1344 * Clear the PIPE*STAT regs before the IIR
1345 */
1346 if (pipe_stats[pipe] & 0x8000ffff) {
1347 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1348 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1349 pipe_name(pipe));
1350 I915_WRITE(reg, pipe_stats[pipe]);
1351 }
1352 }
1353 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1354
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001355 for_each_pipe(pipe) {
1356 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1357 drm_handle_vblank(dev, pipe);
1358
1359 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1360 intel_prepare_page_flip(dev, pipe);
1361 intel_finish_page_flip(dev, pipe);
1362 }
Daniel Vetter4356d582013-10-16 22:55:55 +02001363
1364 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02001365 i9xx_pipe_crc_irq_handler(dev, pipe);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001366 }
1367
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001368 /* Consume port. Then clear IIR or we'll miss events */
1369 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1370 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02001371 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001372
1373 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1374 hotplug_status);
Daniel Vetter91d131d2013-06-27 17:52:14 +02001375
1376 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1377
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001378 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1379 I915_READ(PORT_HOTPLUG_STAT);
1380 }
1381
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001382 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1383 gmbus_irq_handler(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001384
Paulo Zanoni60611c12013-08-15 11:50:01 -03001385 if (pm_iir)
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001386 gen6_rps_irq_handler(dev_priv, pm_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001387
1388 I915_WRITE(GTIIR, gt_iir);
1389 I915_WRITE(GEN6_PMIIR, pm_iir);
1390 I915_WRITE(VLV_IIR, iir);
1391 }
1392
1393out:
1394 return ret;
1395}
1396
Adam Jackson23e81d62012-06-06 15:45:44 -04001397static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08001398{
1399 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001400 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001401 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08001402
Daniel Vetter91d131d2013-06-27 17:52:14 +02001403 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1404
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001405 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1406 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1407 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08001408 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001409 port_name(port));
1410 }
Jesse Barnes776ad802011-01-04 15:09:39 -08001411
Daniel Vetterce99c252012-12-01 13:53:47 +01001412 if (pch_iir & SDE_AUX_MASK)
1413 dp_aux_irq_handler(dev);
1414
Jesse Barnes776ad802011-01-04 15:09:39 -08001415 if (pch_iir & SDE_GMBUS)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001416 gmbus_irq_handler(dev);
Jesse Barnes776ad802011-01-04 15:09:39 -08001417
1418 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1419 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1420
1421 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1422 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1423
1424 if (pch_iir & SDE_POISON)
1425 DRM_ERROR("PCH poison interrupt\n");
1426
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001427 if (pch_iir & SDE_FDI_MASK)
1428 for_each_pipe(pipe)
1429 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1430 pipe_name(pipe),
1431 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08001432
1433 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1434 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1435
1436 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1437 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1438
Jesse Barnes776ad802011-01-04 15:09:39 -08001439 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Paulo Zanoni86642812013-04-12 17:57:57 -03001440 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1441 false))
1442 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1443
1444 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1445 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1446 false))
1447 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1448}
1449
1450static void ivb_err_int_handler(struct drm_device *dev)
1451{
1452 struct drm_i915_private *dev_priv = dev->dev_private;
1453 u32 err_int = I915_READ(GEN7_ERR_INT);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001454 enum pipe pipe;
Paulo Zanoni86642812013-04-12 17:57:57 -03001455
Paulo Zanonide032bf2013-04-12 17:57:58 -03001456 if (err_int & ERR_INT_POISON)
1457 DRM_ERROR("Poison interrupt\n");
1458
Daniel Vetter5a69b892013-10-16 22:55:52 +02001459 for_each_pipe(pipe) {
1460 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1461 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1462 false))
1463 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1464 pipe_name(pipe));
1465 }
Paulo Zanoni86642812013-04-12 17:57:57 -03001466
Daniel Vetter5a69b892013-10-16 22:55:52 +02001467 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1468 if (IS_IVYBRIDGE(dev))
Daniel Vetter277de952013-10-18 16:37:07 +02001469 ivb_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001470 else
Daniel Vetter277de952013-10-18 16:37:07 +02001471 hsw_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001472 }
1473 }
Shuang He8bf1e9f2013-10-15 18:55:27 +01001474
Paulo Zanoni86642812013-04-12 17:57:57 -03001475 I915_WRITE(GEN7_ERR_INT, err_int);
1476}
1477
1478static void cpt_serr_int_handler(struct drm_device *dev)
1479{
1480 struct drm_i915_private *dev_priv = dev->dev_private;
1481 u32 serr_int = I915_READ(SERR_INT);
1482
Paulo Zanonide032bf2013-04-12 17:57:58 -03001483 if (serr_int & SERR_INT_POISON)
1484 DRM_ERROR("PCH poison interrupt\n");
1485
Paulo Zanoni86642812013-04-12 17:57:57 -03001486 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1487 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1488 false))
1489 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1490
1491 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1492 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1493 false))
1494 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1495
1496 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1497 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1498 false))
1499 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1500
1501 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08001502}
1503
Adam Jackson23e81d62012-06-06 15:45:44 -04001504static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1505{
1506 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1507 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001508 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04001509
Daniel Vetter91d131d2013-06-27 17:52:14 +02001510 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1511
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001512 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1513 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1514 SDE_AUDIO_POWER_SHIFT_CPT);
1515 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1516 port_name(port));
1517 }
Adam Jackson23e81d62012-06-06 15:45:44 -04001518
1519 if (pch_iir & SDE_AUX_MASK_CPT)
Daniel Vetterce99c252012-12-01 13:53:47 +01001520 dp_aux_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001521
1522 if (pch_iir & SDE_GMBUS_CPT)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001523 gmbus_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001524
1525 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1526 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1527
1528 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1529 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1530
1531 if (pch_iir & SDE_FDI_MASK_CPT)
1532 for_each_pipe(pipe)
1533 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1534 pipe_name(pipe),
1535 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03001536
1537 if (pch_iir & SDE_ERROR_CPT)
1538 cpt_serr_int_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001539}
1540
Paulo Zanonic008bc62013-07-12 16:35:10 -03001541static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1542{
1543 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter40da17c2013-10-21 18:04:36 +02001544 enum pipe pipe;
Paulo Zanonic008bc62013-07-12 16:35:10 -03001545
1546 if (de_iir & DE_AUX_CHANNEL_A)
1547 dp_aux_irq_handler(dev);
1548
1549 if (de_iir & DE_GSE)
1550 intel_opregion_asle_intr(dev);
1551
Paulo Zanonic008bc62013-07-12 16:35:10 -03001552 if (de_iir & DE_POISON)
1553 DRM_ERROR("Poison interrupt\n");
1554
Daniel Vetter40da17c2013-10-21 18:04:36 +02001555 for_each_pipe(pipe) {
1556 if (de_iir & DE_PIPE_VBLANK(pipe))
1557 drm_handle_vblank(dev, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03001558
Daniel Vetter40da17c2013-10-21 18:04:36 +02001559 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1560 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1561 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1562 pipe_name(pipe));
Paulo Zanonic008bc62013-07-12 16:35:10 -03001563
Daniel Vetter40da17c2013-10-21 18:04:36 +02001564 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1565 i9xx_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001566
Daniel Vetter40da17c2013-10-21 18:04:36 +02001567 /* plane/pipes map 1:1 on ilk+ */
1568 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1569 intel_prepare_page_flip(dev, pipe);
1570 intel_finish_page_flip_plane(dev, pipe);
1571 }
Paulo Zanonic008bc62013-07-12 16:35:10 -03001572 }
1573
1574 /* check event from PCH */
1575 if (de_iir & DE_PCH_EVENT) {
1576 u32 pch_iir = I915_READ(SDEIIR);
1577
1578 if (HAS_PCH_CPT(dev))
1579 cpt_irq_handler(dev, pch_iir);
1580 else
1581 ibx_irq_handler(dev, pch_iir);
1582
1583 /* should clear PCH hotplug event before clear CPU irq */
1584 I915_WRITE(SDEIIR, pch_iir);
1585 }
1586
1587 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1588 ironlake_rps_change_irq_handler(dev);
1589}
1590
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001591static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1592{
1593 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02001594 enum pipe i;
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001595
1596 if (de_iir & DE_ERR_INT_IVB)
1597 ivb_err_int_handler(dev);
1598
1599 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1600 dp_aux_irq_handler(dev);
1601
1602 if (de_iir & DE_GSE_IVB)
1603 intel_opregion_asle_intr(dev);
1604
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02001605 for_each_pipe(i) {
Daniel Vetter40da17c2013-10-21 18:04:36 +02001606 if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001607 drm_handle_vblank(dev, i);
Daniel Vetter40da17c2013-10-21 18:04:36 +02001608
1609 /* plane/pipes map 1:1 on ilk+ */
1610 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001611 intel_prepare_page_flip(dev, i);
1612 intel_finish_page_flip_plane(dev, i);
1613 }
1614 }
1615
1616 /* check event from PCH */
1617 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1618 u32 pch_iir = I915_READ(SDEIIR);
1619
1620 cpt_irq_handler(dev, pch_iir);
1621
1622 /* clear PCH hotplug event before clear CPU irq */
1623 I915_WRITE(SDEIIR, pch_iir);
1624 }
1625}
1626
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001627static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001628{
1629 struct drm_device *dev = (struct drm_device *) arg;
1630 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001631 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01001632 irqreturn_t ret = IRQ_NONE;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001633
1634 atomic_inc(&dev_priv->irq_received);
1635
Paulo Zanoni86642812013-04-12 17:57:57 -03001636 /* We get interrupts on unclaimed registers, so check for this before we
1637 * do any I915_{READ,WRITE}. */
Chris Wilson907b28c2013-07-19 20:36:52 +01001638 intel_uncore_check_errors(dev);
Paulo Zanoni86642812013-04-12 17:57:57 -03001639
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001640 /* disable master interrupt before clearing iir */
1641 de_ier = I915_READ(DEIER);
1642 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Paulo Zanoni23a78512013-07-12 16:35:14 -03001643 POSTING_READ(DEIER);
Chris Wilson0e434062012-05-09 21:45:44 +01001644
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001645 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1646 * interrupts will will be stored on its back queue, and then we'll be
1647 * able to process them after we restore SDEIER (as soon as we restore
1648 * it, we'll get an interrupt if SDEIIR still has something to process
1649 * due to its back queue). */
Ben Widawskyab5c6082013-04-05 13:12:41 -07001650 if (!HAS_PCH_NOP(dev)) {
1651 sde_ier = I915_READ(SDEIER);
1652 I915_WRITE(SDEIER, 0);
1653 POSTING_READ(SDEIER);
1654 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001655
Chris Wilson0e434062012-05-09 21:45:44 +01001656 gt_iir = I915_READ(GTIIR);
1657 if (gt_iir) {
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001658 if (INTEL_INFO(dev)->gen >= 6)
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001659 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001660 else
1661 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001662 I915_WRITE(GTIIR, gt_iir);
1663 ret = IRQ_HANDLED;
1664 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001665
1666 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01001667 if (de_iir) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001668 if (INTEL_INFO(dev)->gen >= 7)
1669 ivb_display_irq_handler(dev, de_iir);
1670 else
1671 ilk_display_irq_handler(dev, de_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001672 I915_WRITE(DEIIR, de_iir);
1673 ret = IRQ_HANDLED;
1674 }
1675
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001676 if (INTEL_INFO(dev)->gen >= 6) {
1677 u32 pm_iir = I915_READ(GEN6_PMIIR);
1678 if (pm_iir) {
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001679 gen6_rps_irq_handler(dev_priv, pm_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001680 I915_WRITE(GEN6_PMIIR, pm_iir);
1681 ret = IRQ_HANDLED;
1682 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001683 }
1684
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001685 I915_WRITE(DEIER, de_ier);
1686 POSTING_READ(DEIER);
Ben Widawskyab5c6082013-04-05 13:12:41 -07001687 if (!HAS_PCH_NOP(dev)) {
1688 I915_WRITE(SDEIER, sde_ier);
1689 POSTING_READ(SDEIER);
1690 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001691
1692 return ret;
1693}
1694
Daniel Vetter17e1df02013-09-08 21:57:13 +02001695static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1696 bool reset_completed)
1697{
1698 struct intel_ring_buffer *ring;
1699 int i;
1700
1701 /*
1702 * Notify all waiters for GPU completion events that reset state has
1703 * been changed, and that they need to restart their wait after
1704 * checking for potential errors (and bail out to drop locks if there is
1705 * a gpu reset pending so that i915_error_work_func can acquire them).
1706 */
1707
1708 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1709 for_each_ring(ring, dev_priv, i)
1710 wake_up_all(&ring->irq_queue);
1711
1712 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
1713 wake_up_all(&dev_priv->pending_flip_queue);
1714
1715 /*
1716 * Signal tasks blocked in i915_gem_wait_for_error that the pending
1717 * reset state is cleared.
1718 */
1719 if (reset_completed)
1720 wake_up_all(&dev_priv->gpu_error.reset_queue);
1721}
1722
Jesse Barnes8a905232009-07-11 16:48:03 -04001723/**
1724 * i915_error_work_func - do process context error handling work
1725 * @work: work struct
1726 *
1727 * Fire an error uevent so userspace can see that a hang or error
1728 * was detected.
1729 */
1730static void i915_error_work_func(struct work_struct *work)
1731{
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001732 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1733 work);
1734 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1735 gpu_error);
Jesse Barnes8a905232009-07-11 16:48:03 -04001736 struct drm_device *dev = dev_priv->dev;
Ben Widawskycce723e2013-07-19 09:16:42 -07001737 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1738 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1739 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
Daniel Vetter17e1df02013-09-08 21:57:13 +02001740 int ret;
Jesse Barnes8a905232009-07-11 16:48:03 -04001741
Ben Gamarif316a422009-09-14 17:48:46 -04001742 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04001743
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001744 /*
1745 * Note that there's only one work item which does gpu resets, so we
1746 * need not worry about concurrent gpu resets potentially incrementing
1747 * error->reset_counter twice. We only need to take care of another
1748 * racing irq/hangcheck declaring the gpu dead for a second time. A
1749 * quick check for that is good enough: schedule_work ensures the
1750 * correct ordering between hang detection and this work item, and since
1751 * the reset in-progress bit is only ever set by code outside of this
1752 * work we don't need to worry about any other races.
1753 */
1754 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
Chris Wilsonf803aa52010-09-19 12:38:26 +01001755 DRM_DEBUG_DRIVER("resetting chip\n");
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001756 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1757 reset_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001758
Daniel Vetter17e1df02013-09-08 21:57:13 +02001759 /*
1760 * All state reset _must_ be completed before we update the
1761 * reset counter, for otherwise waiters might miss the reset
1762 * pending state and not properly drop locks, resulting in
1763 * deadlocks with the reset work.
1764 */
Daniel Vetterf69061b2012-12-06 09:01:42 +01001765 ret = i915_reset(dev);
1766
Daniel Vetter17e1df02013-09-08 21:57:13 +02001767 intel_display_handle_reset(dev);
1768
Daniel Vetterf69061b2012-12-06 09:01:42 +01001769 if (ret == 0) {
1770 /*
1771 * After all the gem state is reset, increment the reset
1772 * counter and wake up everyone waiting for the reset to
1773 * complete.
1774 *
1775 * Since unlock operations are a one-sided barrier only,
1776 * we need to insert a barrier here to order any seqno
1777 * updates before
1778 * the counter increment.
1779 */
1780 smp_mb__before_atomic_inc();
1781 atomic_inc(&dev_priv->gpu_error.reset_counter);
1782
1783 kobject_uevent_env(&dev->primary->kdev.kobj,
1784 KOBJ_CHANGE, reset_done_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001785 } else {
1786 atomic_set(&error->reset_counter, I915_WEDGED);
Ben Gamarif316a422009-09-14 17:48:46 -04001787 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001788
Daniel Vetter17e1df02013-09-08 21:57:13 +02001789 /*
1790 * Note: The wake_up also serves as a memory barrier so that
1791 * waiters see the update value of the reset counter atomic_t.
1792 */
1793 i915_error_wake_up(dev_priv, true);
Ben Gamarif316a422009-09-14 17:48:46 -04001794 }
Jesse Barnes8a905232009-07-11 16:48:03 -04001795}
1796
Chris Wilson35aed2e2010-05-27 13:18:12 +01001797static void i915_report_and_clear_eir(struct drm_device *dev)
Jesse Barnes8a905232009-07-11 16:48:03 -04001798{
1799 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskybd9854f2012-08-23 15:18:09 -07001800 uint32_t instdone[I915_NUM_INSTDONE_REG];
Jesse Barnes8a905232009-07-11 16:48:03 -04001801 u32 eir = I915_READ(EIR);
Ben Widawsky050ee912012-08-22 11:32:15 -07001802 int pipe, i;
Jesse Barnes8a905232009-07-11 16:48:03 -04001803
Chris Wilson35aed2e2010-05-27 13:18:12 +01001804 if (!eir)
1805 return;
Jesse Barnes8a905232009-07-11 16:48:03 -04001806
Joe Perchesa70491c2012-03-18 13:00:11 -07001807 pr_err("render error detected, EIR: 0x%08x\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04001808
Ben Widawskybd9854f2012-08-23 15:18:09 -07001809 i915_get_extra_instdone(dev, instdone);
1810
Jesse Barnes8a905232009-07-11 16:48:03 -04001811 if (IS_G4X(dev)) {
1812 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1813 u32 ipeir = I915_READ(IPEIR_I965);
1814
Joe Perchesa70491c2012-03-18 13:00:11 -07001815 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1816 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Ben Widawsky050ee912012-08-22 11:32:15 -07001817 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1818 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Joe Perchesa70491c2012-03-18 13:00:11 -07001819 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07001820 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04001821 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001822 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04001823 }
1824 if (eir & GM45_ERROR_PAGE_TABLE) {
1825 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07001826 pr_err("page table error\n");
1827 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04001828 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001829 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04001830 }
1831 }
1832
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001833 if (!IS_GEN2(dev)) {
Jesse Barnes8a905232009-07-11 16:48:03 -04001834 if (eir & I915_ERROR_PAGE_TABLE) {
1835 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07001836 pr_err("page table error\n");
1837 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04001838 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001839 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04001840 }
1841 }
1842
1843 if (eir & I915_ERROR_MEMORY_REFRESH) {
Joe Perchesa70491c2012-03-18 13:00:11 -07001844 pr_err("memory refresh error:\n");
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001845 for_each_pipe(pipe)
Joe Perchesa70491c2012-03-18 13:00:11 -07001846 pr_err("pipe %c stat: 0x%08x\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001847 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
Jesse Barnes8a905232009-07-11 16:48:03 -04001848 /* pipestat has already been acked */
1849 }
1850 if (eir & I915_ERROR_INSTRUCTION) {
Joe Perchesa70491c2012-03-18 13:00:11 -07001851 pr_err("instruction error\n");
1852 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
Ben Widawsky050ee912012-08-22 11:32:15 -07001853 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1854 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001855 if (INTEL_INFO(dev)->gen < 4) {
Jesse Barnes8a905232009-07-11 16:48:03 -04001856 u32 ipeir = I915_READ(IPEIR);
1857
Joe Perchesa70491c2012-03-18 13:00:11 -07001858 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1859 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
Joe Perchesa70491c2012-03-18 13:00:11 -07001860 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
Jesse Barnes8a905232009-07-11 16:48:03 -04001861 I915_WRITE(IPEIR, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001862 POSTING_READ(IPEIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04001863 } else {
1864 u32 ipeir = I915_READ(IPEIR_I965);
1865
Joe Perchesa70491c2012-03-18 13:00:11 -07001866 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1867 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Joe Perchesa70491c2012-03-18 13:00:11 -07001868 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07001869 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04001870 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001871 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04001872 }
1873 }
1874
1875 I915_WRITE(EIR, eir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001876 POSTING_READ(EIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04001877 eir = I915_READ(EIR);
1878 if (eir) {
1879 /*
1880 * some errors might have become stuck,
1881 * mask them.
1882 */
1883 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1884 I915_WRITE(EMR, I915_READ(EMR) | eir);
1885 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1886 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01001887}
1888
1889/**
1890 * i915_handle_error - handle an error interrupt
1891 * @dev: drm device
1892 *
1893 * Do some basic checking of regsiter state at error interrupt time and
1894 * dump it to the syslog. Also call i915_capture_error_state() to make
1895 * sure we get a record and make it available in debugfs. Fire a uevent
1896 * so userspace knows something bad happened (should trigger collection
1897 * of a ring dump etc.).
1898 */
Chris Wilson527f9e92010-11-11 01:16:58 +00001899void i915_handle_error(struct drm_device *dev, bool wedged)
Chris Wilson35aed2e2010-05-27 13:18:12 +01001900{
1901 struct drm_i915_private *dev_priv = dev->dev_private;
1902
1903 i915_capture_error_state(dev);
1904 i915_report_and_clear_eir(dev);
Jesse Barnes8a905232009-07-11 16:48:03 -04001905
Ben Gamariba1234d2009-09-14 17:48:47 -04001906 if (wedged) {
Daniel Vetterf69061b2012-12-06 09:01:42 +01001907 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1908 &dev_priv->gpu_error.reset_counter);
Ben Gamariba1234d2009-09-14 17:48:47 -04001909
Ben Gamari11ed50e2009-09-14 17:48:45 -04001910 /*
Daniel Vetter17e1df02013-09-08 21:57:13 +02001911 * Wakeup waiting processes so that the reset work function
1912 * i915_error_work_func doesn't deadlock trying to grab various
1913 * locks. By bumping the reset counter first, the woken
1914 * processes will see a reset in progress and back off,
1915 * releasing their locks and then wait for the reset completion.
1916 * We must do this for _all_ gpu waiters that might hold locks
1917 * that the reset work needs to acquire.
1918 *
1919 * Note: The wake_up serves as the required memory barrier to
1920 * ensure that the waiters see the updated value of the reset
1921 * counter atomic_t.
Ben Gamari11ed50e2009-09-14 17:48:45 -04001922 */
Daniel Vetter17e1df02013-09-08 21:57:13 +02001923 i915_error_wake_up(dev_priv, false);
Ben Gamari11ed50e2009-09-14 17:48:45 -04001924 }
1925
Daniel Vetter122f46b2013-09-04 17:36:14 +02001926 /*
1927 * Our reset work can grab modeset locks (since it needs to reset the
1928 * state of outstanding pagelips). Hence it must not be run on our own
1929 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
1930 * code will deadlock.
1931 */
1932 schedule_work(&dev_priv->gpu_error.work);
Jesse Barnes8a905232009-07-11 16:48:03 -04001933}
1934
Ville Syrjälä21ad8332013-02-19 15:16:39 +02001935static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001936{
1937 drm_i915_private_t *dev_priv = dev->dev_private;
1938 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1939 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Chris Wilson05394f32010-11-08 19:18:58 +00001940 struct drm_i915_gem_object *obj;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001941 struct intel_unpin_work *work;
1942 unsigned long flags;
1943 bool stall_detected;
1944
1945 /* Ignore early vblank irqs */
1946 if (intel_crtc == NULL)
1947 return;
1948
1949 spin_lock_irqsave(&dev->event_lock, flags);
1950 work = intel_crtc->unpin_work;
1951
Chris Wilsone7d841c2012-12-03 11:36:30 +00001952 if (work == NULL ||
1953 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1954 !work->enable_stall_check) {
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001955 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1956 spin_unlock_irqrestore(&dev->event_lock, flags);
1957 return;
1958 }
1959
1960 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
Chris Wilson05394f32010-11-08 19:18:58 +00001961 obj = work->pending_flip_obj;
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001962 if (INTEL_INFO(dev)->gen >= 4) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001963 int dspsurf = DSPSURF(intel_crtc->plane);
Armin Reese446f2542012-03-30 16:20:16 -07001964 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001965 i915_gem_obj_ggtt_offset(obj);
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001966 } else {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001967 int dspaddr = DSPADDR(intel_crtc->plane);
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001968 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
Ville Syrjälä01f2c772011-12-20 00:06:49 +02001969 crtc->y * crtc->fb->pitches[0] +
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001970 crtc->x * crtc->fb->bits_per_pixel/8);
1971 }
1972
1973 spin_unlock_irqrestore(&dev->event_lock, flags);
1974
1975 if (stall_detected) {
1976 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1977 intel_prepare_page_flip(dev, intel_crtc->plane);
1978 }
1979}
1980
Keith Packard42f52ef2008-10-18 19:39:29 -07001981/* Called from drm generic code, passed 'crtc' which
1982 * we use as a pipe index
1983 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07001984static int i915_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07001985{
1986 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07001987 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08001988
Chris Wilson5eddb702010-09-11 13:48:45 +01001989 if (!i915_pipe_enabled(dev, pipe))
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08001990 return -EINVAL;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07001991
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001992 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07001993 if (INTEL_INFO(dev)->gen >= 4)
Keith Packard7c463582008-11-04 02:03:27 -08001994 i915_enable_pipestat(dev_priv, pipe,
1995 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Keith Packarde9d21d72008-10-16 11:31:38 -07001996 else
Keith Packard7c463582008-11-04 02:03:27 -08001997 i915_enable_pipestat(dev_priv, pipe,
1998 PIPE_VBLANK_INTERRUPT_ENABLE);
Chris Wilson8692d00e2011-02-05 10:08:21 +00001999
2000 /* maintain vblank delivery even in deep C-states */
2001 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002002 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002003 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002004
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002005 return 0;
2006}
2007
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002008static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002009{
2010 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2011 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002012 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c2013-10-21 18:04:36 +02002013 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002014
2015 if (!i915_pipe_enabled(dev, pipe))
2016 return -EINVAL;
2017
2018 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002019 ironlake_enable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002020 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2021
2022 return 0;
2023}
2024
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002025static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2026{
2027 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2028 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002029 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002030
2031 if (!i915_pipe_enabled(dev, pipe))
2032 return -EINVAL;
2033
2034 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002035 imr = I915_READ(VLV_IMR);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02002036 if (pipe == PIPE_A)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002037 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002038 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002039 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002040 I915_WRITE(VLV_IMR, imr);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002041 i915_enable_pipestat(dev_priv, pipe,
2042 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002043 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2044
2045 return 0;
2046}
2047
Keith Packard42f52ef2008-10-18 19:39:29 -07002048/* Called from drm generic code, passed 'crtc' which
2049 * we use as a pipe index
2050 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002051static void i915_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002052{
2053 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002054 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002055
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002056 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002057 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002058 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson8692d00e2011-02-05 10:08:21 +00002059
Jesse Barnesf796cf82011-04-07 13:58:17 -07002060 i915_disable_pipestat(dev_priv, pipe,
2061 PIPE_VBLANK_INTERRUPT_ENABLE |
2062 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2063 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2064}
2065
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002066static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002067{
2068 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2069 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002070 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c2013-10-21 18:04:36 +02002071 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002072
2073 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002074 ironlake_disable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002075 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2076}
2077
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002078static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2079{
2080 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2081 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002082 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002083
2084 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002085 i915_disable_pipestat(dev_priv, pipe,
2086 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002087 imr = I915_READ(VLV_IMR);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02002088 if (pipe == PIPE_A)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002089 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002090 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002091 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002092 I915_WRITE(VLV_IMR, imr);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002093 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2094}
2095
Chris Wilson893eead2010-10-27 14:44:35 +01002096static u32
2097ring_last_seqno(struct intel_ring_buffer *ring)
Zou Nan hai852835f2010-05-21 09:08:56 +08002098{
Chris Wilson893eead2010-10-27 14:44:35 +01002099 return list_entry(ring->request_list.prev,
2100 struct drm_i915_gem_request, list)->seqno;
2101}
2102
Chris Wilson9107e9d2013-06-10 11:20:20 +01002103static bool
2104ring_idle(struct intel_ring_buffer *ring, u32 seqno)
Chris Wilson893eead2010-10-27 14:44:35 +01002105{
Chris Wilson9107e9d2013-06-10 11:20:20 +01002106 return (list_empty(&ring->request_list) ||
2107 i915_seqno_passed(seqno, ring_last_seqno(ring)));
Ben Gamarif65d9422009-09-14 17:48:44 -04002108}
2109
Chris Wilson6274f212013-06-10 11:20:21 +01002110static struct intel_ring_buffer *
2111semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
Chris Wilsona24a11e2013-03-14 17:52:05 +02002112{
2113 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Chris Wilson6274f212013-06-10 11:20:21 +01002114 u32 cmd, ipehr, acthd, acthd_min;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002115
2116 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2117 if ((ipehr & ~(0x3 << 16)) !=
2118 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
Chris Wilson6274f212013-06-10 11:20:21 +01002119 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002120
2121 /* ACTHD is likely pointing to the dword after the actual command,
2122 * so scan backwards until we find the MBOX.
2123 */
Chris Wilson6274f212013-06-10 11:20:21 +01002124 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002125 acthd_min = max((int)acthd - 3 * 4, 0);
2126 do {
2127 cmd = ioread32(ring->virtual_start + acthd);
2128 if (cmd == ipehr)
2129 break;
2130
2131 acthd -= 4;
2132 if (acthd < acthd_min)
Chris Wilson6274f212013-06-10 11:20:21 +01002133 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002134 } while (1);
2135
Chris Wilson6274f212013-06-10 11:20:21 +01002136 *seqno = ioread32(ring->virtual_start+acthd+4)+1;
2137 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
Chris Wilsona24a11e2013-03-14 17:52:05 +02002138}
2139
Chris Wilson6274f212013-06-10 11:20:21 +01002140static int semaphore_passed(struct intel_ring_buffer *ring)
2141{
2142 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2143 struct intel_ring_buffer *signaller;
2144 u32 seqno, ctl;
2145
2146 ring->hangcheck.deadlock = true;
2147
2148 signaller = semaphore_waits_for(ring, &seqno);
2149 if (signaller == NULL || signaller->hangcheck.deadlock)
2150 return -1;
2151
2152 /* cursory check for an unkickable deadlock */
2153 ctl = I915_READ_CTL(signaller);
2154 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2155 return -1;
2156
2157 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2158}
2159
2160static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2161{
2162 struct intel_ring_buffer *ring;
2163 int i;
2164
2165 for_each_ring(ring, dev_priv, i)
2166 ring->hangcheck.deadlock = false;
2167}
2168
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002169static enum intel_ring_hangcheck_action
2170ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002171{
2172 struct drm_device *dev = ring->dev;
2173 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002174 u32 tmp;
2175
Chris Wilson6274f212013-06-10 11:20:21 +01002176 if (ring->hangcheck.acthd != acthd)
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002177 return HANGCHECK_ACTIVE;
Chris Wilson6274f212013-06-10 11:20:21 +01002178
Chris Wilson9107e9d2013-06-10 11:20:20 +01002179 if (IS_GEN2(dev))
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002180 return HANGCHECK_HUNG;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002181
2182 /* Is the chip hanging on a WAIT_FOR_EVENT?
2183 * If so we can simply poke the RB_WAIT bit
2184 * and break the hang. This should work on
2185 * all but the second generation chipsets.
2186 */
2187 tmp = I915_READ_CTL(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002188 if (tmp & RING_WAIT) {
2189 DRM_ERROR("Kicking stuck wait on %s\n",
2190 ring->name);
Chris Wilson09e14bf2013-10-10 09:37:19 +01002191 i915_handle_error(dev, false);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002192 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002193 return HANGCHECK_KICK;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002194 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02002195
Chris Wilson6274f212013-06-10 11:20:21 +01002196 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2197 switch (semaphore_passed(ring)) {
2198 default:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002199 return HANGCHECK_HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002200 case 1:
2201 DRM_ERROR("Kicking stuck semaphore on %s\n",
2202 ring->name);
Chris Wilson09e14bf2013-10-10 09:37:19 +01002203 i915_handle_error(dev, false);
Chris Wilson6274f212013-06-10 11:20:21 +01002204 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002205 return HANGCHECK_KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002206 case 0:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002207 return HANGCHECK_WAIT;
Chris Wilson6274f212013-06-10 11:20:21 +01002208 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002209 }
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002210
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002211 return HANGCHECK_HUNG;
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002212}
2213
Ben Gamarif65d9422009-09-14 17:48:44 -04002214/**
2215 * This is called when the chip hasn't reported back with completed
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002216 * batchbuffers in a long time. We keep track per ring seqno progress and
2217 * if there are no progress, hangcheck score for that ring is increased.
2218 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2219 * we kick the ring. If we see no progress on three subsequent calls
2220 * we assume chip is wedged and try to fix it by resetting the chip.
Ben Gamarif65d9422009-09-14 17:48:44 -04002221 */
Damien Lespiaua658b5d2013-08-08 22:28:56 +01002222static void i915_hangcheck_elapsed(unsigned long data)
Ben Gamarif65d9422009-09-14 17:48:44 -04002223{
2224 struct drm_device *dev = (struct drm_device *)data;
2225 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002226 struct intel_ring_buffer *ring;
Chris Wilsonb4519512012-05-11 14:29:30 +01002227 int i;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002228 int busy_count = 0, rings_hung = 0;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002229 bool stuck[I915_NUM_RINGS] = { 0 };
2230#define BUSY 1
2231#define KICK 5
2232#define HUNG 20
2233#define FIRE 30
Chris Wilson893eead2010-10-27 14:44:35 +01002234
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07002235 if (!i915_enable_hangcheck)
2236 return;
2237
Chris Wilsonb4519512012-05-11 14:29:30 +01002238 for_each_ring(ring, dev_priv, i) {
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002239 u32 seqno, acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002240 bool busy = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01002241
Chris Wilson6274f212013-06-10 11:20:21 +01002242 semaphore_clear_deadlocks(dev_priv);
2243
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002244 seqno = ring->get_seqno(ring, false);
2245 acthd = intel_ring_get_active_head(ring);
Chris Wilsond1e61e72012-04-10 17:00:41 +01002246
Chris Wilson9107e9d2013-06-10 11:20:20 +01002247 if (ring->hangcheck.seqno == seqno) {
2248 if (ring_idle(ring, seqno)) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002249 ring->hangcheck.action = HANGCHECK_IDLE;
2250
Chris Wilson9107e9d2013-06-10 11:20:20 +01002251 if (waitqueue_active(&ring->irq_queue)) {
2252 /* Issue a wake-up to catch stuck h/w. */
Chris Wilson094f9a52013-09-25 17:34:55 +01002253 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
Daniel Vetterf4adcd22013-10-28 09:24:13 +01002254 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2255 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2256 ring->name);
2257 else
2258 DRM_INFO("Fake missed irq on %s\n",
2259 ring->name);
Chris Wilson094f9a52013-09-25 17:34:55 +01002260 wake_up_all(&ring->irq_queue);
2261 }
2262 /* Safeguard against driver failure */
2263 ring->hangcheck.score += BUSY;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002264 } else
2265 busy = false;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002266 } else {
Chris Wilson6274f212013-06-10 11:20:21 +01002267 /* We always increment the hangcheck score
2268 * if the ring is busy and still processing
2269 * the same request, so that no single request
2270 * can run indefinitely (such as a chain of
2271 * batches). The only time we do not increment
2272 * the hangcheck score on this ring, if this
2273 * ring is in a legitimate wait for another
2274 * ring. In that case the waiting ring is a
2275 * victim and we want to be sure we catch the
2276 * right culprit. Then every time we do kick
2277 * the ring, add a small increment to the
2278 * score so that we can catch a batch that is
2279 * being repeatedly kicked and so responsible
2280 * for stalling the machine.
2281 */
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002282 ring->hangcheck.action = ring_stuck(ring,
2283 acthd);
2284
2285 switch (ring->hangcheck.action) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002286 case HANGCHECK_IDLE:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002287 case HANGCHECK_WAIT:
Chris Wilson6274f212013-06-10 11:20:21 +01002288 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002289 case HANGCHECK_ACTIVE:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002290 ring->hangcheck.score += BUSY;
Chris Wilson6274f212013-06-10 11:20:21 +01002291 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002292 case HANGCHECK_KICK:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002293 ring->hangcheck.score += KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002294 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002295 case HANGCHECK_HUNG:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002296 ring->hangcheck.score += HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002297 stuck[i] = true;
2298 break;
2299 }
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002300 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002301 } else {
Mika Kuoppalada661462013-09-06 16:03:28 +03002302 ring->hangcheck.action = HANGCHECK_ACTIVE;
2303
Chris Wilson9107e9d2013-06-10 11:20:20 +01002304 /* Gradually reduce the count so that we catch DoS
2305 * attempts across multiple batches.
2306 */
2307 if (ring->hangcheck.score > 0)
2308 ring->hangcheck.score--;
Chris Wilsond1e61e72012-04-10 17:00:41 +01002309 }
2310
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002311 ring->hangcheck.seqno = seqno;
2312 ring->hangcheck.acthd = acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002313 busy_count += busy;
Chris Wilson893eead2010-10-27 14:44:35 +01002314 }
Eric Anholtb9201c12010-01-08 14:25:16 -08002315
Mika Kuoppala92cab732013-05-24 17:16:07 +03002316 for_each_ring(ring, dev_priv, i) {
Chris Wilson9107e9d2013-06-10 11:20:20 +01002317 if (ring->hangcheck.score > FIRE) {
Daniel Vetterb8d88d12013-08-28 10:57:59 +02002318 DRM_INFO("%s on %s\n",
2319 stuck[i] ? "stuck" : "no progress",
2320 ring->name);
Chris Wilsona43adf02013-06-10 11:20:22 +01002321 rings_hung++;
Mika Kuoppala92cab732013-05-24 17:16:07 +03002322 }
2323 }
2324
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002325 if (rings_hung)
2326 return i915_handle_error(dev, true);
Ben Gamarif65d9422009-09-14 17:48:44 -04002327
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002328 if (busy_count)
2329 /* Reset timer case chip hangs without another request
2330 * being added */
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002331 i915_queue_hangcheck(dev);
2332}
2333
2334void i915_queue_hangcheck(struct drm_device *dev)
2335{
2336 struct drm_i915_private *dev_priv = dev->dev_private;
2337 if (!i915_enable_hangcheck)
2338 return;
2339
2340 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2341 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
Ben Gamarif65d9422009-09-14 17:48:44 -04002342}
2343
Paulo Zanoni91738a92013-06-05 14:21:51 -03002344static void ibx_irq_preinstall(struct drm_device *dev)
2345{
2346 struct drm_i915_private *dev_priv = dev->dev_private;
2347
2348 if (HAS_PCH_NOP(dev))
2349 return;
2350
2351 /* south display irq */
2352 I915_WRITE(SDEIMR, 0xffffffff);
2353 /*
2354 * SDEIER is also touched by the interrupt handler to work around missed
2355 * PCH interrupts. Hence we can't update it after the interrupt handler
2356 * is enabled - instead we unconditionally enable all PCH interrupt
2357 * sources here, but then only unmask them as needed with SDEIMR.
2358 */
2359 I915_WRITE(SDEIER, 0xffffffff);
2360 POSTING_READ(SDEIER);
2361}
2362
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002363static void gen5_gt_irq_preinstall(struct drm_device *dev)
2364{
2365 struct drm_i915_private *dev_priv = dev->dev_private;
2366
2367 /* and GT */
2368 I915_WRITE(GTIMR, 0xffffffff);
2369 I915_WRITE(GTIER, 0x0);
2370 POSTING_READ(GTIER);
2371
2372 if (INTEL_INFO(dev)->gen >= 6) {
2373 /* and PM */
2374 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2375 I915_WRITE(GEN6_PMIER, 0x0);
2376 POSTING_READ(GEN6_PMIER);
2377 }
2378}
2379
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380/* drm_dma.h hooks
2381*/
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002382static void ironlake_irq_preinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002383{
2384 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2385
Jesse Barnes46979952011-04-07 13:53:55 -07002386 atomic_set(&dev_priv->irq_received, 0);
2387
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002388 I915_WRITE(HWSTAM, 0xeffe);
Daniel Vetterbdfcdb62012-01-05 01:05:26 +01002389
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002390 I915_WRITE(DEIMR, 0xffffffff);
2391 I915_WRITE(DEIER, 0x0);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002392 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002393
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002394 gen5_gt_irq_preinstall(dev);
Zhenyu Wangc6501562009-11-03 18:57:21 +00002395
Paulo Zanoni91738a92013-06-05 14:21:51 -03002396 ibx_irq_preinstall(dev);
Ben Widawsky7d991632013-05-28 19:22:25 -07002397}
2398
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002399static void valleyview_irq_preinstall(struct drm_device *dev)
2400{
2401 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2402 int pipe;
2403
2404 atomic_set(&dev_priv->irq_received, 0);
2405
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002406 /* VLV magic */
2407 I915_WRITE(VLV_IMR, 0);
2408 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2409 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2410 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2411
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002412 /* and GT */
2413 I915_WRITE(GTIIR, I915_READ(GTIIR));
2414 I915_WRITE(GTIIR, I915_READ(GTIIR));
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002415
2416 gen5_gt_irq_preinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002417
2418 I915_WRITE(DPINVGTT, 0xff);
2419
2420 I915_WRITE(PORT_HOTPLUG_EN, 0);
2421 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2422 for_each_pipe(pipe)
2423 I915_WRITE(PIPESTAT(pipe), 0xffff);
2424 I915_WRITE(VLV_IIR, 0xffffffff);
2425 I915_WRITE(VLV_IMR, 0xffffffff);
2426 I915_WRITE(VLV_IER, 0x0);
2427 POSTING_READ(VLV_IER);
2428}
2429
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002430static void ibx_hpd_irq_setup(struct drm_device *dev)
Keith Packard7fe0b972011-09-19 13:31:02 -07002431{
2432 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002433 struct drm_mode_config *mode_config = &dev->mode_config;
2434 struct intel_encoder *intel_encoder;
Daniel Vetterfee884e2013-07-04 23:35:21 +02002435 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
Keith Packard7fe0b972011-09-19 13:31:02 -07002436
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002437 if (HAS_PCH_IBX(dev)) {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002438 hotplug_irqs = SDE_HOTPLUG_MASK;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002439 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002440 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002441 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002442 } else {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002443 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002444 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002445 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002446 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002447 }
2448
Daniel Vetterfee884e2013-07-04 23:35:21 +02002449 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002450
2451 /*
2452 * Enable digital hotplug on the PCH, and configure the DP short pulse
2453 * duration to 2ms (which is the minimum in the Display Port spec)
2454 *
2455 * This register is the same on all known PCH chips.
2456 */
Keith Packard7fe0b972011-09-19 13:31:02 -07002457 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2458 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2459 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2460 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2461 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2462 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2463}
2464
Paulo Zanonid46da432013-02-08 17:35:15 -02002465static void ibx_irq_postinstall(struct drm_device *dev)
2466{
2467 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002468 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02002469
Daniel Vetter692a04c2013-05-29 21:43:05 +02002470 if (HAS_PCH_NOP(dev))
2471 return;
2472
Paulo Zanoni86642812013-04-12 17:57:57 -03002473 if (HAS_PCH_IBX(dev)) {
2474 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
Paulo Zanonide032bf2013-04-12 17:57:58 -03002475 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
Paulo Zanoni86642812013-04-12 17:57:57 -03002476 } else {
2477 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2478
2479 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2480 }
Ben Widawskyab5c6082013-04-05 13:12:41 -07002481
Paulo Zanonid46da432013-02-08 17:35:15 -02002482 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2483 I915_WRITE(SDEIMR, ~mask);
Paulo Zanonid46da432013-02-08 17:35:15 -02002484}
2485
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002486static void gen5_gt_irq_postinstall(struct drm_device *dev)
2487{
2488 struct drm_i915_private *dev_priv = dev->dev_private;
2489 u32 pm_irqs, gt_irqs;
2490
2491 pm_irqs = gt_irqs = 0;
2492
2493 dev_priv->gt_irq_mask = ~0;
Ben Widawsky040d2ba2013-09-19 11:01:40 -07002494 if (HAS_L3_DPF(dev)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002495 /* L3 parity interrupt is always unmasked. */
Ben Widawsky35a85ac2013-09-19 11:13:41 -07002496 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
2497 gt_irqs |= GT_PARITY_ERROR(dev);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002498 }
2499
2500 gt_irqs |= GT_RENDER_USER_INTERRUPT;
2501 if (IS_GEN5(dev)) {
2502 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2503 ILK_BSD_USER_INTERRUPT;
2504 } else {
2505 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2506 }
2507
2508 I915_WRITE(GTIIR, I915_READ(GTIIR));
2509 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2510 I915_WRITE(GTIER, gt_irqs);
2511 POSTING_READ(GTIER);
2512
2513 if (INTEL_INFO(dev)->gen >= 6) {
2514 pm_irqs |= GEN6_PM_RPS_EVENTS;
2515
2516 if (HAS_VEBOX(dev))
2517 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2518
Paulo Zanoni605cd252013-08-06 18:57:15 -03002519 dev_priv->pm_irq_mask = 0xffffffff;
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002520 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
Paulo Zanoni605cd252013-08-06 18:57:15 -03002521 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002522 I915_WRITE(GEN6_PMIER, pm_irqs);
2523 POSTING_READ(GEN6_PMIER);
2524 }
2525}
2526
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002527static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002528{
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002529 unsigned long irqflags;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002530 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002531 u32 display_mask, extra_mask;
2532
2533 if (INTEL_INFO(dev)->gen >= 7) {
2534 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2535 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
2536 DE_PLANEB_FLIP_DONE_IVB |
2537 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2538 DE_ERR_INT_IVB);
2539 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2540 DE_PIPEA_VBLANK_IVB);
2541
2542 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2543 } else {
2544 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2545 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02002546 DE_AUX_CHANNEL_A |
2547 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
2548 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
2549 DE_POISON);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002550 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2551 }
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002552
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002553 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002554
2555 /* should always can generate irq */
2556 I915_WRITE(DEIIR, I915_READ(DEIIR));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002557 I915_WRITE(DEIMR, dev_priv->irq_mask);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002558 I915_WRITE(DEIER, display_mask | extra_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002559 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002560
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002561 gen5_gt_irq_postinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002562
Paulo Zanonid46da432013-02-08 17:35:15 -02002563 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07002564
Jesse Barnesf97108d2010-01-29 11:27:07 -08002565 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02002566 /* Enable PCU event interrupts
2567 *
2568 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002569 * setup is guaranteed to run in single-threaded context. But we
2570 * need it to make the assert_spin_locked happy. */
2571 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08002572 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002573 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08002574 }
2575
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002576 return 0;
2577}
2578
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002579static int valleyview_irq_postinstall(struct drm_device *dev)
2580{
2581 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002582 u32 enable_mask;
Daniel Vetter379ef822013-10-16 22:55:56 +02002583 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
2584 PIPE_CRC_DONE_ENABLE;
Daniel Vetterb79480b2013-06-27 17:52:10 +02002585 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002586
2587 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002588 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2589 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2590 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002591 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2592
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002593 /*
2594 *Leave vblank interrupts masked initially. enable/disable will
2595 * toggle them based on usage.
2596 */
2597 dev_priv->irq_mask = (~enable_mask) |
2598 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2599 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002600
Daniel Vetter20afbda2012-12-11 14:05:07 +01002601 I915_WRITE(PORT_HOTPLUG_EN, 0);
2602 POSTING_READ(PORT_HOTPLUG_EN);
2603
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002604 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2605 I915_WRITE(VLV_IER, enable_mask);
2606 I915_WRITE(VLV_IIR, 0xffffffff);
2607 I915_WRITE(PIPESTAT(0), 0xffff);
2608 I915_WRITE(PIPESTAT(1), 0xffff);
2609 POSTING_READ(VLV_IER);
2610
Daniel Vetterb79480b2013-06-27 17:52:10 +02002611 /* Interrupt setup is already guaranteed to be single-threaded, this is
2612 * just to make the assert_spin_locked check happy. */
2613 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02002614 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
2615 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
2616 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
Daniel Vetterb79480b2013-06-27 17:52:10 +02002617 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002618
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002619 I915_WRITE(VLV_IIR, 0xffffffff);
2620 I915_WRITE(VLV_IIR, 0xffffffff);
2621
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002622 gen5_gt_irq_postinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002623
2624 /* ack & enable invalid PTE error interrupts */
2625#if 0 /* FIXME: add support to irq handler for checking these bits */
2626 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2627 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2628#endif
2629
2630 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Daniel Vetter20afbda2012-12-11 14:05:07 +01002631
2632 return 0;
2633}
2634
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002635static void valleyview_irq_uninstall(struct drm_device *dev)
2636{
2637 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2638 int pipe;
2639
2640 if (!dev_priv)
2641 return;
2642
Egbert Eichac4c16c2013-04-16 13:36:58 +02002643 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2644
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002645 for_each_pipe(pipe)
2646 I915_WRITE(PIPESTAT(pipe), 0xffff);
2647
2648 I915_WRITE(HWSTAM, 0xffffffff);
2649 I915_WRITE(PORT_HOTPLUG_EN, 0);
2650 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2651 for_each_pipe(pipe)
2652 I915_WRITE(PIPESTAT(pipe), 0xffff);
2653 I915_WRITE(VLV_IIR, 0xffffffff);
2654 I915_WRITE(VLV_IMR, 0xffffffff);
2655 I915_WRITE(VLV_IER, 0x0);
2656 POSTING_READ(VLV_IER);
2657}
2658
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002659static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002660{
2661 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes46979952011-04-07 13:53:55 -07002662
2663 if (!dev_priv)
2664 return;
2665
Egbert Eichac4c16c2013-04-16 13:36:58 +02002666 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2667
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002668 I915_WRITE(HWSTAM, 0xffffffff);
2669
2670 I915_WRITE(DEIMR, 0xffffffff);
2671 I915_WRITE(DEIER, 0x0);
2672 I915_WRITE(DEIIR, I915_READ(DEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03002673 if (IS_GEN7(dev))
2674 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002675
2676 I915_WRITE(GTIMR, 0xffffffff);
2677 I915_WRITE(GTIER, 0x0);
2678 I915_WRITE(GTIIR, I915_READ(GTIIR));
Keith Packard192aac1f2011-09-20 10:12:44 -07002679
Ben Widawskyab5c6082013-04-05 13:12:41 -07002680 if (HAS_PCH_NOP(dev))
2681 return;
2682
Keith Packard192aac1f2011-09-20 10:12:44 -07002683 I915_WRITE(SDEIMR, 0xffffffff);
2684 I915_WRITE(SDEIER, 0x0);
2685 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03002686 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2687 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002688}
2689
Chris Wilsonc2798b12012-04-22 21:13:57 +01002690static void i8xx_irq_preinstall(struct drm_device * dev)
2691{
2692 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2693 int pipe;
2694
2695 atomic_set(&dev_priv->irq_received, 0);
2696
2697 for_each_pipe(pipe)
2698 I915_WRITE(PIPESTAT(pipe), 0);
2699 I915_WRITE16(IMR, 0xffff);
2700 I915_WRITE16(IER, 0x0);
2701 POSTING_READ16(IER);
2702}
2703
2704static int i8xx_irq_postinstall(struct drm_device *dev)
2705{
2706 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter379ef822013-10-16 22:55:56 +02002707 unsigned long irqflags;
Chris Wilsonc2798b12012-04-22 21:13:57 +01002708
Chris Wilsonc2798b12012-04-22 21:13:57 +01002709 I915_WRITE16(EMR,
2710 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2711
2712 /* Unmask the interrupts that we always want on. */
2713 dev_priv->irq_mask =
2714 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2715 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2716 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2717 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2718 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2719 I915_WRITE16(IMR, dev_priv->irq_mask);
2720
2721 I915_WRITE16(IER,
2722 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2723 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2724 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2725 I915_USER_INTERRUPT);
2726 POSTING_READ16(IER);
2727
Daniel Vetter379ef822013-10-16 22:55:56 +02002728 /* Interrupt setup is already guaranteed to be single-threaded, this is
2729 * just to make the assert_spin_locked check happy. */
2730 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02002731 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
2732 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
Daniel Vetter379ef822013-10-16 22:55:56 +02002733 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2734
Chris Wilsonc2798b12012-04-22 21:13:57 +01002735 return 0;
2736}
2737
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002738/*
2739 * Returns true when a page flip has completed.
2740 */
2741static bool i8xx_handle_vblank(struct drm_device *dev,
2742 int pipe, u16 iir)
2743{
2744 drm_i915_private_t *dev_priv = dev->dev_private;
2745 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2746
2747 if (!drm_handle_vblank(dev, pipe))
2748 return false;
2749
2750 if ((iir & flip_pending) == 0)
2751 return false;
2752
2753 intel_prepare_page_flip(dev, pipe);
2754
2755 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2756 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2757 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2758 * the flip is completed (no longer pending). Since this doesn't raise
2759 * an interrupt per se, we watch for the change at vblank.
2760 */
2761 if (I915_READ16(ISR) & flip_pending)
2762 return false;
2763
2764 intel_finish_page_flip(dev, pipe);
2765
2766 return true;
2767}
2768
Daniel Vetterff1f5252012-10-02 15:10:55 +02002769static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01002770{
2771 struct drm_device *dev = (struct drm_device *) arg;
2772 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01002773 u16 iir, new_iir;
2774 u32 pipe_stats[2];
2775 unsigned long irqflags;
Chris Wilsonc2798b12012-04-22 21:13:57 +01002776 int pipe;
2777 u16 flip_mask =
2778 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2779 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2780
2781 atomic_inc(&dev_priv->irq_received);
2782
2783 iir = I915_READ16(IIR);
2784 if (iir == 0)
2785 return IRQ_NONE;
2786
2787 while (iir & ~flip_mask) {
2788 /* Can't rely on pipestat interrupt bit in iir as it might
2789 * have been cleared after the pipestat interrupt was received.
2790 * It doesn't set the bit in iir again, but it still produces
2791 * interrupts (for non-MSI).
2792 */
2793 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2794 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2795 i915_handle_error(dev, false);
2796
2797 for_each_pipe(pipe) {
2798 int reg = PIPESTAT(pipe);
2799 pipe_stats[pipe] = I915_READ(reg);
2800
2801 /*
2802 * Clear the PIPE*STAT regs before the IIR
2803 */
2804 if (pipe_stats[pipe] & 0x8000ffff) {
2805 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2806 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2807 pipe_name(pipe));
2808 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002809 }
2810 }
2811 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2812
2813 I915_WRITE16(IIR, iir & ~flip_mask);
2814 new_iir = I915_READ16(IIR); /* Flush posted writes */
2815
Daniel Vetterd05c6172012-04-26 23:28:09 +02002816 i915_update_dri1_breadcrumb(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002817
2818 if (iir & I915_USER_INTERRUPT)
2819 notify_ring(dev, &dev_priv->ring[RCS]);
2820
Daniel Vetter4356d582013-10-16 22:55:55 +02002821 for_each_pipe(pipe) {
2822 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2823 i8xx_handle_vblank(dev, pipe, iir))
2824 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002825
Daniel Vetter4356d582013-10-16 22:55:55 +02002826 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02002827 i9xx_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter4356d582013-10-16 22:55:55 +02002828 }
Chris Wilsonc2798b12012-04-22 21:13:57 +01002829
2830 iir = new_iir;
2831 }
2832
2833 return IRQ_HANDLED;
2834}
2835
2836static void i8xx_irq_uninstall(struct drm_device * dev)
2837{
2838 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2839 int pipe;
2840
Chris Wilsonc2798b12012-04-22 21:13:57 +01002841 for_each_pipe(pipe) {
2842 /* Clear enable bits; then clear status bits */
2843 I915_WRITE(PIPESTAT(pipe), 0);
2844 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2845 }
2846 I915_WRITE16(IMR, 0xffff);
2847 I915_WRITE16(IER, 0x0);
2848 I915_WRITE16(IIR, I915_READ16(IIR));
2849}
2850
Chris Wilsona266c7d2012-04-24 22:59:44 +01002851static void i915_irq_preinstall(struct drm_device * dev)
2852{
2853 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2854 int pipe;
2855
2856 atomic_set(&dev_priv->irq_received, 0);
2857
2858 if (I915_HAS_HOTPLUG(dev)) {
2859 I915_WRITE(PORT_HOTPLUG_EN, 0);
2860 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2861 }
2862
Chris Wilson00d98eb2012-04-24 22:59:48 +01002863 I915_WRITE16(HWSTAM, 0xeffe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002864 for_each_pipe(pipe)
2865 I915_WRITE(PIPESTAT(pipe), 0);
2866 I915_WRITE(IMR, 0xffffffff);
2867 I915_WRITE(IER, 0x0);
2868 POSTING_READ(IER);
2869}
2870
2871static int i915_irq_postinstall(struct drm_device *dev)
2872{
2873 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson38bde182012-04-24 22:59:50 +01002874 u32 enable_mask;
Daniel Vetter379ef822013-10-16 22:55:56 +02002875 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002876
Chris Wilson38bde182012-04-24 22:59:50 +01002877 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2878
2879 /* Unmask the interrupts that we always want on. */
2880 dev_priv->irq_mask =
2881 ~(I915_ASLE_INTERRUPT |
2882 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2883 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2884 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2885 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2886 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2887
2888 enable_mask =
2889 I915_ASLE_INTERRUPT |
2890 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2891 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2892 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2893 I915_USER_INTERRUPT;
2894
Chris Wilsona266c7d2012-04-24 22:59:44 +01002895 if (I915_HAS_HOTPLUG(dev)) {
Daniel Vetter20afbda2012-12-11 14:05:07 +01002896 I915_WRITE(PORT_HOTPLUG_EN, 0);
2897 POSTING_READ(PORT_HOTPLUG_EN);
2898
Chris Wilsona266c7d2012-04-24 22:59:44 +01002899 /* Enable in IER... */
2900 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2901 /* and unmask in IMR */
2902 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2903 }
2904
Chris Wilsona266c7d2012-04-24 22:59:44 +01002905 I915_WRITE(IMR, dev_priv->irq_mask);
2906 I915_WRITE(IER, enable_mask);
2907 POSTING_READ(IER);
2908
Jani Nikulaf49e38d2013-04-29 13:02:54 +03002909 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01002910
Daniel Vetter379ef822013-10-16 22:55:56 +02002911 /* Interrupt setup is already guaranteed to be single-threaded, this is
2912 * just to make the assert_spin_locked check happy. */
2913 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02002914 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
2915 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
Daniel Vetter379ef822013-10-16 22:55:56 +02002916 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2917
Daniel Vetter20afbda2012-12-11 14:05:07 +01002918 return 0;
2919}
2920
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002921/*
2922 * Returns true when a page flip has completed.
2923 */
2924static bool i915_handle_vblank(struct drm_device *dev,
2925 int plane, int pipe, u32 iir)
2926{
2927 drm_i915_private_t *dev_priv = dev->dev_private;
2928 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2929
2930 if (!drm_handle_vblank(dev, pipe))
2931 return false;
2932
2933 if ((iir & flip_pending) == 0)
2934 return false;
2935
2936 intel_prepare_page_flip(dev, plane);
2937
2938 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2939 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2940 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2941 * the flip is completed (no longer pending). Since this doesn't raise
2942 * an interrupt per se, we watch for the change at vblank.
2943 */
2944 if (I915_READ(ISR) & flip_pending)
2945 return false;
2946
2947 intel_finish_page_flip(dev, pipe);
2948
2949 return true;
2950}
2951
Daniel Vetterff1f5252012-10-02 15:10:55 +02002952static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01002953{
2954 struct drm_device *dev = (struct drm_device *) arg;
2955 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson8291ee92012-04-24 22:59:47 +01002956 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01002957 unsigned long irqflags;
Chris Wilson38bde182012-04-24 22:59:50 +01002958 u32 flip_mask =
2959 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2960 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilson38bde182012-04-24 22:59:50 +01002961 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002962
2963 atomic_inc(&dev_priv->irq_received);
2964
2965 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01002966 do {
2967 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01002968 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002969
2970 /* Can't rely on pipestat interrupt bit in iir as it might
2971 * have been cleared after the pipestat interrupt was received.
2972 * It doesn't set the bit in iir again, but it still produces
2973 * interrupts (for non-MSI).
2974 */
2975 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2976 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2977 i915_handle_error(dev, false);
2978
2979 for_each_pipe(pipe) {
2980 int reg = PIPESTAT(pipe);
2981 pipe_stats[pipe] = I915_READ(reg);
2982
Chris Wilson38bde182012-04-24 22:59:50 +01002983 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01002984 if (pipe_stats[pipe] & 0x8000ffff) {
2985 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2986 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2987 pipe_name(pipe));
2988 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01002989 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002990 }
2991 }
2992 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2993
2994 if (!irq_received)
2995 break;
2996
Chris Wilsona266c7d2012-04-24 22:59:44 +01002997 /* Consume port. Then clear IIR or we'll miss events */
2998 if ((I915_HAS_HOTPLUG(dev)) &&
2999 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3000 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02003001 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003002
3003 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3004 hotplug_status);
Daniel Vetter91d131d2013-06-27 17:52:14 +02003005
3006 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3007
Chris Wilsona266c7d2012-04-24 22:59:44 +01003008 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
Chris Wilson38bde182012-04-24 22:59:50 +01003009 POSTING_READ(PORT_HOTPLUG_STAT);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003010 }
3011
Chris Wilson38bde182012-04-24 22:59:50 +01003012 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003013 new_iir = I915_READ(IIR); /* Flush posted writes */
3014
Chris Wilsona266c7d2012-04-24 22:59:44 +01003015 if (iir & I915_USER_INTERRUPT)
3016 notify_ring(dev, &dev_priv->ring[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003017
Chris Wilsona266c7d2012-04-24 22:59:44 +01003018 for_each_pipe(pipe) {
Chris Wilson38bde182012-04-24 22:59:50 +01003019 int plane = pipe;
3020 if (IS_MOBILE(dev))
3021 plane = !plane;
Ville Syrjälä5e2032d2013-02-19 15:16:38 +02003022
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003023 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3024 i915_handle_vblank(dev, plane, pipe, iir))
3025 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003026
3027 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3028 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003029
3030 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003031 i9xx_pipe_crc_irq_handler(dev, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003032 }
3033
Chris Wilsona266c7d2012-04-24 22:59:44 +01003034 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3035 intel_opregion_asle_intr(dev);
3036
3037 /* With MSI, interrupts are only generated when iir
3038 * transitions from zero to nonzero. If another bit got
3039 * set while we were handling the existing iir bits, then
3040 * we would never get another interrupt.
3041 *
3042 * This is fine on non-MSI as well, as if we hit this path
3043 * we avoid exiting the interrupt handler only to generate
3044 * another one.
3045 *
3046 * Note that for MSI this could cause a stray interrupt report
3047 * if an interrupt landed in the time between writing IIR and
3048 * the posting read. This should be rare enough to never
3049 * trigger the 99% of 100,000 interrupts test for disabling
3050 * stray interrupts.
3051 */
Chris Wilson38bde182012-04-24 22:59:50 +01003052 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003053 iir = new_iir;
Chris Wilson38bde182012-04-24 22:59:50 +01003054 } while (iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003055
Daniel Vetterd05c6172012-04-26 23:28:09 +02003056 i915_update_dri1_breadcrumb(dev);
Chris Wilson8291ee92012-04-24 22:59:47 +01003057
Chris Wilsona266c7d2012-04-24 22:59:44 +01003058 return ret;
3059}
3060
3061static void i915_irq_uninstall(struct drm_device * dev)
3062{
3063 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3064 int pipe;
3065
Egbert Eichac4c16c2013-04-16 13:36:58 +02003066 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3067
Chris Wilsona266c7d2012-04-24 22:59:44 +01003068 if (I915_HAS_HOTPLUG(dev)) {
3069 I915_WRITE(PORT_HOTPLUG_EN, 0);
3070 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3071 }
3072
Chris Wilson00d98eb2012-04-24 22:59:48 +01003073 I915_WRITE16(HWSTAM, 0xffff);
Chris Wilson55b39752012-04-24 22:59:49 +01003074 for_each_pipe(pipe) {
3075 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003076 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01003077 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3078 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003079 I915_WRITE(IMR, 0xffffffff);
3080 I915_WRITE(IER, 0x0);
3081
Chris Wilsona266c7d2012-04-24 22:59:44 +01003082 I915_WRITE(IIR, I915_READ(IIR));
3083}
3084
3085static void i965_irq_preinstall(struct drm_device * dev)
3086{
3087 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3088 int pipe;
3089
3090 atomic_set(&dev_priv->irq_received, 0);
3091
Chris Wilsonadca4732012-05-11 18:01:31 +01003092 I915_WRITE(PORT_HOTPLUG_EN, 0);
3093 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003094
3095 I915_WRITE(HWSTAM, 0xeffe);
3096 for_each_pipe(pipe)
3097 I915_WRITE(PIPESTAT(pipe), 0);
3098 I915_WRITE(IMR, 0xffffffff);
3099 I915_WRITE(IER, 0x0);
3100 POSTING_READ(IER);
3101}
3102
3103static int i965_irq_postinstall(struct drm_device *dev)
3104{
3105 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003106 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003107 u32 error_mask;
Daniel Vetterb79480b2013-06-27 17:52:10 +02003108 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003109
Chris Wilsona266c7d2012-04-24 22:59:44 +01003110 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003111 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01003112 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003113 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3114 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3115 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3116 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3117 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3118
3119 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003120 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3121 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003122 enable_mask |= I915_USER_INTERRUPT;
3123
3124 if (IS_G4X(dev))
3125 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003126
Daniel Vetterb79480b2013-06-27 17:52:10 +02003127 /* Interrupt setup is already guaranteed to be single-threaded, this is
3128 * just to make the assert_spin_locked check happy. */
3129 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02003130 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
3131 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3132 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
Daniel Vetterb79480b2013-06-27 17:52:10 +02003133 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003134
Chris Wilsona266c7d2012-04-24 22:59:44 +01003135 /*
3136 * Enable some error detection, note the instruction error mask
3137 * bit is reserved, so we leave it masked.
3138 */
3139 if (IS_G4X(dev)) {
3140 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3141 GM45_ERROR_MEM_PRIV |
3142 GM45_ERROR_CP_PRIV |
3143 I915_ERROR_MEMORY_REFRESH);
3144 } else {
3145 error_mask = ~(I915_ERROR_PAGE_TABLE |
3146 I915_ERROR_MEMORY_REFRESH);
3147 }
3148 I915_WRITE(EMR, error_mask);
3149
3150 I915_WRITE(IMR, dev_priv->irq_mask);
3151 I915_WRITE(IER, enable_mask);
3152 POSTING_READ(IER);
3153
Daniel Vetter20afbda2012-12-11 14:05:07 +01003154 I915_WRITE(PORT_HOTPLUG_EN, 0);
3155 POSTING_READ(PORT_HOTPLUG_EN);
3156
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003157 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003158
3159 return 0;
3160}
3161
Egbert Eichbac56d52013-02-25 12:06:51 -05003162static void i915_hpd_irq_setup(struct drm_device *dev)
Daniel Vetter20afbda2012-12-11 14:05:07 +01003163{
3164 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Egbert Eiche5868a32013-02-28 04:17:12 -05003165 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +02003166 struct intel_encoder *intel_encoder;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003167 u32 hotplug_en;
3168
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003169 assert_spin_locked(&dev_priv->irq_lock);
3170
Egbert Eichbac56d52013-02-25 12:06:51 -05003171 if (I915_HAS_HOTPLUG(dev)) {
3172 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3173 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3174 /* Note HDMI and DP share hotplug bits */
Egbert Eiche5868a32013-02-28 04:17:12 -05003175 /* enable bits are the same for all generations */
Egbert Eichcd569ae2013-04-16 13:36:57 +02003176 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3177 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3178 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
Egbert Eichbac56d52013-02-25 12:06:51 -05003179 /* Programming the CRT detection parameters tends
3180 to generate a spurious hotplug event about three
3181 seconds later. So just do it once.
3182 */
3183 if (IS_G4X(dev))
3184 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Daniel Vetter85fc95b2013-03-27 15:47:11 +01003185 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
Egbert Eichbac56d52013-02-25 12:06:51 -05003186 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003187
Egbert Eichbac56d52013-02-25 12:06:51 -05003188 /* Ignore TV since it's buggy */
3189 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3190 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003191}
3192
Daniel Vetterff1f5252012-10-02 15:10:55 +02003193static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003194{
3195 struct drm_device *dev = (struct drm_device *) arg;
3196 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003197 u32 iir, new_iir;
3198 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003199 unsigned long irqflags;
3200 int irq_received;
3201 int ret = IRQ_NONE, pipe;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003202 u32 flip_mask =
3203 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3204 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003205
3206 atomic_inc(&dev_priv->irq_received);
3207
3208 iir = I915_READ(IIR);
3209
Chris Wilsona266c7d2012-04-24 22:59:44 +01003210 for (;;) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01003211 bool blc_event = false;
3212
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003213 irq_received = (iir & ~flip_mask) != 0;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003214
3215 /* Can't rely on pipestat interrupt bit in iir as it might
3216 * have been cleared after the pipestat interrupt was received.
3217 * It doesn't set the bit in iir again, but it still produces
3218 * interrupts (for non-MSI).
3219 */
3220 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3221 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3222 i915_handle_error(dev, false);
3223
3224 for_each_pipe(pipe) {
3225 int reg = PIPESTAT(pipe);
3226 pipe_stats[pipe] = I915_READ(reg);
3227
3228 /*
3229 * Clear the PIPE*STAT regs before the IIR
3230 */
3231 if (pipe_stats[pipe] & 0x8000ffff) {
3232 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3233 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3234 pipe_name(pipe));
3235 I915_WRITE(reg, pipe_stats[pipe]);
3236 irq_received = 1;
3237 }
3238 }
3239 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3240
3241 if (!irq_received)
3242 break;
3243
3244 ret = IRQ_HANDLED;
3245
3246 /* Consume port. Then clear IIR or we'll miss events */
Chris Wilsonadca4732012-05-11 18:01:31 +01003247 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003248 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02003249 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3250 HOTPLUG_INT_STATUS_G4X :
Daniel Vetter4f7fd702013-06-24 21:33:28 +02003251 HOTPLUG_INT_STATUS_I915);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003252
3253 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3254 hotplug_status);
Daniel Vetter91d131d2013-06-27 17:52:14 +02003255
3256 intel_hpd_irq_handler(dev, hotplug_trigger,
3257 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
3258
Chris Wilsona266c7d2012-04-24 22:59:44 +01003259 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3260 I915_READ(PORT_HOTPLUG_STAT);
3261 }
3262
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003263 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003264 new_iir = I915_READ(IIR); /* Flush posted writes */
3265
Chris Wilsona266c7d2012-04-24 22:59:44 +01003266 if (iir & I915_USER_INTERRUPT)
3267 notify_ring(dev, &dev_priv->ring[RCS]);
3268 if (iir & I915_BSD_USER_INTERRUPT)
3269 notify_ring(dev, &dev_priv->ring[VCS]);
3270
Chris Wilsona266c7d2012-04-24 22:59:44 +01003271 for_each_pipe(pipe) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01003272 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003273 i915_handle_vblank(dev, pipe, pipe, iir))
3274 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003275
3276 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3277 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003278
3279 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003280 i9xx_pipe_crc_irq_handler(dev, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003281 }
3282
3283
3284 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3285 intel_opregion_asle_intr(dev);
3286
Daniel Vetter515ac2b2012-12-01 13:53:44 +01003287 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3288 gmbus_irq_handler(dev);
3289
Chris Wilsona266c7d2012-04-24 22:59:44 +01003290 /* With MSI, interrupts are only generated when iir
3291 * transitions from zero to nonzero. If another bit got
3292 * set while we were handling the existing iir bits, then
3293 * we would never get another interrupt.
3294 *
3295 * This is fine on non-MSI as well, as if we hit this path
3296 * we avoid exiting the interrupt handler only to generate
3297 * another one.
3298 *
3299 * Note that for MSI this could cause a stray interrupt report
3300 * if an interrupt landed in the time between writing IIR and
3301 * the posting read. This should be rare enough to never
3302 * trigger the 99% of 100,000 interrupts test for disabling
3303 * stray interrupts.
3304 */
3305 iir = new_iir;
3306 }
3307
Daniel Vetterd05c6172012-04-26 23:28:09 +02003308 i915_update_dri1_breadcrumb(dev);
Chris Wilson2c8ba292012-04-24 22:59:46 +01003309
Chris Wilsona266c7d2012-04-24 22:59:44 +01003310 return ret;
3311}
3312
3313static void i965_irq_uninstall(struct drm_device * dev)
3314{
3315 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3316 int pipe;
3317
3318 if (!dev_priv)
3319 return;
3320
Egbert Eichac4c16c2013-04-16 13:36:58 +02003321 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3322
Chris Wilsonadca4732012-05-11 18:01:31 +01003323 I915_WRITE(PORT_HOTPLUG_EN, 0);
3324 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003325
3326 I915_WRITE(HWSTAM, 0xffffffff);
3327 for_each_pipe(pipe)
3328 I915_WRITE(PIPESTAT(pipe), 0);
3329 I915_WRITE(IMR, 0xffffffff);
3330 I915_WRITE(IER, 0x0);
3331
3332 for_each_pipe(pipe)
3333 I915_WRITE(PIPESTAT(pipe),
3334 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3335 I915_WRITE(IIR, I915_READ(IIR));
3336}
3337
Egbert Eichac4c16c2013-04-16 13:36:58 +02003338static void i915_reenable_hotplug_timer_func(unsigned long data)
3339{
3340 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3341 struct drm_device *dev = dev_priv->dev;
3342 struct drm_mode_config *mode_config = &dev->mode_config;
3343 unsigned long irqflags;
3344 int i;
3345
3346 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3347 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3348 struct drm_connector *connector;
3349
3350 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3351 continue;
3352
3353 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3354
3355 list_for_each_entry(connector, &mode_config->connector_list, head) {
3356 struct intel_connector *intel_connector = to_intel_connector(connector);
3357
3358 if (intel_connector->encoder->hpd_pin == i) {
3359 if (connector->polled != intel_connector->polled)
3360 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3361 drm_get_connector_name(connector));
3362 connector->polled = intel_connector->polled;
3363 if (!connector->polled)
3364 connector->polled = DRM_CONNECTOR_POLL_HPD;
3365 }
3366 }
3367 }
3368 if (dev_priv->display.hpd_irq_setup)
3369 dev_priv->display.hpd_irq_setup(dev);
3370 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3371}
3372
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003373void intel_irq_init(struct drm_device *dev)
3374{
Chris Wilson8b2e3262012-04-24 22:59:41 +01003375 struct drm_i915_private *dev_priv = dev->dev_private;
3376
3377 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
Daniel Vetter99584db2012-11-14 17:14:04 +01003378 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003379 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01003380 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Chris Wilson8b2e3262012-04-24 22:59:41 +01003381
Daniel Vetter99584db2012-11-14 17:14:04 +01003382 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3383 i915_hangcheck_elapsed,
Daniel Vetter61bac782012-12-01 21:03:21 +01003384 (unsigned long) dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003385 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3386 (unsigned long) dev_priv);
Daniel Vetter61bac782012-12-01 21:03:21 +01003387
Tomas Janousek97a19a22012-12-08 13:48:13 +01003388 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01003389
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +03003390 if (IS_GEN2(dev)) {
3391 dev->max_vblank_count = 0;
3392 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
3393 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003394 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3395 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
Ville Syrjälä391f75e2013-09-25 19:55:26 +03003396 } else {
3397 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3398 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003399 }
3400
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03003401 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
Keith Packardc3613de2011-08-12 17:05:54 -07003402 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03003403 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3404 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003405
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003406 if (IS_VALLEYVIEW(dev)) {
3407 dev->driver->irq_handler = valleyview_irq_handler;
3408 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3409 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3410 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3411 dev->driver->enable_vblank = valleyview_enable_vblank;
3412 dev->driver->disable_vblank = valleyview_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05003413 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003414 } else if (HAS_PCH_SPLIT(dev)) {
3415 dev->driver->irq_handler = ironlake_irq_handler;
3416 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3417 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3418 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3419 dev->driver->enable_vblank = ironlake_enable_vblank;
3420 dev->driver->disable_vblank = ironlake_disable_vblank;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003421 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003422 } else {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003423 if (INTEL_INFO(dev)->gen == 2) {
3424 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3425 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3426 dev->driver->irq_handler = i8xx_irq_handler;
3427 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003428 } else if (INTEL_INFO(dev)->gen == 3) {
3429 dev->driver->irq_preinstall = i915_irq_preinstall;
3430 dev->driver->irq_postinstall = i915_irq_postinstall;
3431 dev->driver->irq_uninstall = i915_irq_uninstall;
3432 dev->driver->irq_handler = i915_irq_handler;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003433 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003434 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003435 dev->driver->irq_preinstall = i965_irq_preinstall;
3436 dev->driver->irq_postinstall = i965_irq_postinstall;
3437 dev->driver->irq_uninstall = i965_irq_uninstall;
3438 dev->driver->irq_handler = i965_irq_handler;
Egbert Eichbac56d52013-02-25 12:06:51 -05003439 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003440 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003441 dev->driver->enable_vblank = i915_enable_vblank;
3442 dev->driver->disable_vblank = i915_disable_vblank;
3443 }
3444}
Daniel Vetter20afbda2012-12-11 14:05:07 +01003445
3446void intel_hpd_init(struct drm_device *dev)
3447{
3448 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eich821450c2013-04-16 13:36:55 +02003449 struct drm_mode_config *mode_config = &dev->mode_config;
3450 struct drm_connector *connector;
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003451 unsigned long irqflags;
Egbert Eich821450c2013-04-16 13:36:55 +02003452 int i;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003453
Egbert Eich821450c2013-04-16 13:36:55 +02003454 for (i = 1; i < HPD_NUM_PINS; i++) {
3455 dev_priv->hpd_stats[i].hpd_cnt = 0;
3456 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3457 }
3458 list_for_each_entry(connector, &mode_config->connector_list, head) {
3459 struct intel_connector *intel_connector = to_intel_connector(connector);
3460 connector->polled = intel_connector->polled;
3461 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3462 connector->polled = DRM_CONNECTOR_POLL_HPD;
3463 }
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003464
3465 /* Interrupt setup is already guaranteed to be single-threaded, this is
3466 * just to make the assert_spin_locked checks happy. */
3467 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003468 if (dev_priv->display.hpd_irq_setup)
3469 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003470 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003471}
Paulo Zanonic67a4702013-08-19 13:18:09 -03003472
3473/* Disable interrupts so we can allow Package C8+. */
3474void hsw_pc8_disable_interrupts(struct drm_device *dev)
3475{
3476 struct drm_i915_private *dev_priv = dev->dev_private;
3477 unsigned long irqflags;
3478
3479 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3480
3481 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
3482 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3483 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3484 dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3485 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3486
3487 ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
3488 ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
3489 ilk_disable_gt_irq(dev_priv, 0xffffffff);
3490 snb_disable_pm_irq(dev_priv, 0xffffffff);
3491
3492 dev_priv->pc8.irqs_disabled = true;
3493
3494 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3495}
3496
3497/* Restore interrupts so we can recover from Package C8+. */
3498void hsw_pc8_restore_interrupts(struct drm_device *dev)
3499{
3500 struct drm_i915_private *dev_priv = dev->dev_private;
3501 unsigned long irqflags;
3502 uint32_t val, expected;
3503
3504 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3505
3506 val = I915_READ(DEIMR);
3507 expected = ~DE_PCH_EVENT_IVB;
3508 WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
3509
3510 val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
3511 expected = ~SDE_HOTPLUG_MASK_CPT;
3512 WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
3513 val, expected);
3514
3515 val = I915_READ(GTIMR);
3516 expected = 0xffffffff;
3517 WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
3518
3519 val = I915_READ(GEN6_PMIMR);
3520 expected = 0xffffffff;
3521 WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
3522 expected);
3523
3524 dev_priv->pc8.irqs_disabled = false;
3525
3526 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3527 ibx_enable_display_interrupt(dev_priv,
3528 ~dev_priv->pc8.regsave.sdeimr &
3529 ~SDE_HOTPLUG_MASK_CPT);
3530 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3531 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3532 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
3533
3534 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3535}