blob: 3e8359eb8b0fafdc02422e2c9bc699c1672792e4 [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Damien Lespiaub2c88f52013-10-15 18:55:29 +010033#include <linux/circ_buf.h>
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/drmP.h>
35#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010037#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Egbert Eiche5868a32013-02-28 04:17:12 -050040static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46};
47
48static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010050 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050051 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54};
55
56static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63};
64
Daniel Vetter704cfb82013-12-18 09:08:43 +010065static const u32 hpd_status_g4x[] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050066 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72};
73
Egbert Eiche5868a32013-02-28 04:17:12 -050074static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81};
82
Zhenyu Wang036a4a72009-06-08 14:40:19 +080083/* For display hotplug interrupt */
Chris Wilson995b6762010-08-20 13:23:26 +010084static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -050085ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +080086{
Daniel Vetter4bc9d432013-06-27 13:44:58 +020087 assert_spin_locked(&dev_priv->irq_lock);
88
Paulo Zanonic67a4702013-08-19 13:18:09 -030089 if (dev_priv->pc8.irqs_disabled) {
90 WARN(1, "IRQs disabled\n");
91 dev_priv->pc8.regsave.deimr &= ~mask;
92 return;
93 }
94
Chris Wilson1ec14ad2010-12-04 11:30:53 +000095 if ((dev_priv->irq_mask & mask) != 0) {
96 dev_priv->irq_mask &= ~mask;
97 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +000098 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +080099 }
100}
101
Paulo Zanoni0ff98002013-02-22 17:05:31 -0300102static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -0500103ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800104{
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200105 assert_spin_locked(&dev_priv->irq_lock);
106
Paulo Zanonic67a4702013-08-19 13:18:09 -0300107 if (dev_priv->pc8.irqs_disabled) {
108 WARN(1, "IRQs disabled\n");
109 dev_priv->pc8.regsave.deimr |= mask;
110 return;
111 }
112
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000113 if ((dev_priv->irq_mask & mask) != mask) {
114 dev_priv->irq_mask |= mask;
115 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000116 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800117 }
118}
119
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300120/**
121 * ilk_update_gt_irq - update GTIMR
122 * @dev_priv: driver private
123 * @interrupt_mask: mask of interrupt bits to update
124 * @enabled_irq_mask: mask of interrupt bits to enable
125 */
126static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
127 uint32_t interrupt_mask,
128 uint32_t enabled_irq_mask)
129{
130 assert_spin_locked(&dev_priv->irq_lock);
131
Paulo Zanonic67a4702013-08-19 13:18:09 -0300132 if (dev_priv->pc8.irqs_disabled) {
133 WARN(1, "IRQs disabled\n");
134 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
135 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
136 interrupt_mask);
137 return;
138 }
139
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300140 dev_priv->gt_irq_mask &= ~interrupt_mask;
141 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
142 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
143 POSTING_READ(GTIMR);
144}
145
146void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
147{
148 ilk_update_gt_irq(dev_priv, mask, mask);
149}
150
151void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
152{
153 ilk_update_gt_irq(dev_priv, mask, 0);
154}
155
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300156/**
157 * snb_update_pm_irq - update GEN6_PMIMR
158 * @dev_priv: driver private
159 * @interrupt_mask: mask of interrupt bits to update
160 * @enabled_irq_mask: mask of interrupt bits to enable
161 */
162static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
163 uint32_t interrupt_mask,
164 uint32_t enabled_irq_mask)
165{
Paulo Zanoni605cd252013-08-06 18:57:15 -0300166 uint32_t new_val;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300167
168 assert_spin_locked(&dev_priv->irq_lock);
169
Paulo Zanonic67a4702013-08-19 13:18:09 -0300170 if (dev_priv->pc8.irqs_disabled) {
171 WARN(1, "IRQs disabled\n");
172 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
173 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
174 interrupt_mask);
175 return;
176 }
177
Paulo Zanoni605cd252013-08-06 18:57:15 -0300178 new_val = dev_priv->pm_irq_mask;
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300179 new_val &= ~interrupt_mask;
180 new_val |= (~enabled_irq_mask & interrupt_mask);
181
Paulo Zanoni605cd252013-08-06 18:57:15 -0300182 if (new_val != dev_priv->pm_irq_mask) {
183 dev_priv->pm_irq_mask = new_val;
184 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300185 POSTING_READ(GEN6_PMIMR);
186 }
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300187}
188
189void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
190{
191 snb_update_pm_irq(dev_priv, mask, mask);
192}
193
194void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
195{
196 snb_update_pm_irq(dev_priv, mask, 0);
197}
198
Paulo Zanoni86642812013-04-12 17:57:57 -0300199static bool ivb_can_enable_err_int(struct drm_device *dev)
200{
201 struct drm_i915_private *dev_priv = dev->dev_private;
202 struct intel_crtc *crtc;
203 enum pipe pipe;
204
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200205 assert_spin_locked(&dev_priv->irq_lock);
206
Paulo Zanoni86642812013-04-12 17:57:57 -0300207 for_each_pipe(pipe) {
208 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
209
210 if (crtc->cpu_fifo_underrun_disabled)
211 return false;
212 }
213
214 return true;
215}
216
217static bool cpt_can_enable_serr_int(struct drm_device *dev)
218{
219 struct drm_i915_private *dev_priv = dev->dev_private;
220 enum pipe pipe;
221 struct intel_crtc *crtc;
222
Daniel Vetterfee884e2013-07-04 23:35:21 +0200223 assert_spin_locked(&dev_priv->irq_lock);
224
Paulo Zanoni86642812013-04-12 17:57:57 -0300225 for_each_pipe(pipe) {
226 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
227
228 if (crtc->pch_fifo_underrun_disabled)
229 return false;
230 }
231
232 return true;
233}
234
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +0200235static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe)
236{
237 struct drm_i915_private *dev_priv = dev->dev_private;
238 u32 reg = PIPESTAT(pipe);
239 u32 pipestat = I915_READ(reg) & 0x7fff0000;
240
241 assert_spin_locked(&dev_priv->irq_lock);
242
243 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
244 POSTING_READ(reg);
245}
246
Paulo Zanoni86642812013-04-12 17:57:57 -0300247static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
248 enum pipe pipe, bool enable)
249{
250 struct drm_i915_private *dev_priv = dev->dev_private;
251 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
252 DE_PIPEB_FIFO_UNDERRUN;
253
254 if (enable)
255 ironlake_enable_display_irq(dev_priv, bit);
256 else
257 ironlake_disable_display_irq(dev_priv, bit);
258}
259
260static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
Daniel Vetter7336df62013-07-09 22:59:16 +0200261 enum pipe pipe, bool enable)
Paulo Zanoni86642812013-04-12 17:57:57 -0300262{
263 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni86642812013-04-12 17:57:57 -0300264 if (enable) {
Daniel Vetter7336df62013-07-09 22:59:16 +0200265 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
266
Paulo Zanoni86642812013-04-12 17:57:57 -0300267 if (!ivb_can_enable_err_int(dev))
268 return;
269
Paulo Zanoni86642812013-04-12 17:57:57 -0300270 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
271 } else {
Daniel Vetter7336df62013-07-09 22:59:16 +0200272 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
273
274 /* Change the state _after_ we've read out the current one. */
Paulo Zanoni86642812013-04-12 17:57:57 -0300275 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
Daniel Vetter7336df62013-07-09 22:59:16 +0200276
277 if (!was_enabled &&
278 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
279 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
280 pipe_name(pipe));
281 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300282 }
283}
284
Daniel Vetter38d83c962013-11-07 11:05:46 +0100285static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
286 enum pipe pipe, bool enable)
287{
288 struct drm_i915_private *dev_priv = dev->dev_private;
289
290 assert_spin_locked(&dev_priv->irq_lock);
291
292 if (enable)
293 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
294 else
295 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
296 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
297 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
298}
299
Daniel Vetterfee884e2013-07-04 23:35:21 +0200300/**
301 * ibx_display_interrupt_update - update SDEIMR
302 * @dev_priv: driver private
303 * @interrupt_mask: mask of interrupt bits to update
304 * @enabled_irq_mask: mask of interrupt bits to enable
305 */
306static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
307 uint32_t interrupt_mask,
308 uint32_t enabled_irq_mask)
309{
310 uint32_t sdeimr = I915_READ(SDEIMR);
311 sdeimr &= ~interrupt_mask;
312 sdeimr |= (~enabled_irq_mask & interrupt_mask);
313
314 assert_spin_locked(&dev_priv->irq_lock);
315
Paulo Zanonic67a4702013-08-19 13:18:09 -0300316 if (dev_priv->pc8.irqs_disabled &&
317 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
318 WARN(1, "IRQs disabled\n");
319 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
320 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
321 interrupt_mask);
322 return;
323 }
324
Daniel Vetterfee884e2013-07-04 23:35:21 +0200325 I915_WRITE(SDEIMR, sdeimr);
326 POSTING_READ(SDEIMR);
327}
328#define ibx_enable_display_interrupt(dev_priv, bits) \
329 ibx_display_interrupt_update((dev_priv), (bits), (bits))
330#define ibx_disable_display_interrupt(dev_priv, bits) \
331 ibx_display_interrupt_update((dev_priv), (bits), 0)
332
Daniel Vetterde280752013-07-04 23:35:24 +0200333static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
334 enum transcoder pch_transcoder,
Paulo Zanoni86642812013-04-12 17:57:57 -0300335 bool enable)
336{
Paulo Zanoni86642812013-04-12 17:57:57 -0300337 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200338 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
339 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
Paulo Zanoni86642812013-04-12 17:57:57 -0300340
341 if (enable)
Daniel Vetterfee884e2013-07-04 23:35:21 +0200342 ibx_enable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300343 else
Daniel Vetterfee884e2013-07-04 23:35:21 +0200344 ibx_disable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300345}
346
347static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
348 enum transcoder pch_transcoder,
349 bool enable)
350{
351 struct drm_i915_private *dev_priv = dev->dev_private;
352
353 if (enable) {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200354 I915_WRITE(SERR_INT,
355 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
356
Paulo Zanoni86642812013-04-12 17:57:57 -0300357 if (!cpt_can_enable_serr_int(dev))
358 return;
359
Daniel Vetterfee884e2013-07-04 23:35:21 +0200360 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Paulo Zanoni86642812013-04-12 17:57:57 -0300361 } else {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200362 uint32_t tmp = I915_READ(SERR_INT);
363 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
364
365 /* Change the state _after_ we've read out the current one. */
Daniel Vetterfee884e2013-07-04 23:35:21 +0200366 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200367
368 if (!was_enabled &&
369 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
370 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
371 transcoder_name(pch_transcoder));
372 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300373 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300374}
375
376/**
377 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
378 * @dev: drm device
379 * @pipe: pipe
380 * @enable: true if we want to report FIFO underrun errors, false otherwise
381 *
382 * This function makes us disable or enable CPU fifo underruns for a specific
383 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
384 * reporting for one pipe may also disable all the other CPU error interruts for
385 * the other pipes, due to the fact that there's just one interrupt mask/enable
386 * bit for all the pipes.
387 *
388 * Returns the previous state of underrun reporting.
389 */
390bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
391 enum pipe pipe, bool enable)
392{
393 struct drm_i915_private *dev_priv = dev->dev_private;
394 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
395 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
396 unsigned long flags;
397 bool ret;
398
399 spin_lock_irqsave(&dev_priv->irq_lock, flags);
400
401 ret = !intel_crtc->cpu_fifo_underrun_disabled;
402
403 if (enable == ret)
404 goto done;
405
406 intel_crtc->cpu_fifo_underrun_disabled = !enable;
407
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +0200408 if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)))
409 i9xx_clear_fifo_underrun(dev, pipe);
410 else if (IS_GEN5(dev) || IS_GEN6(dev))
Paulo Zanoni86642812013-04-12 17:57:57 -0300411 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
412 else if (IS_GEN7(dev))
Daniel Vetter7336df62013-07-09 22:59:16 +0200413 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
Daniel Vetter38d83c962013-11-07 11:05:46 +0100414 else if (IS_GEN8(dev))
415 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300416
417done:
418 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
419 return ret;
420}
421
Imre Deak91d181d2014-02-10 18:42:49 +0200422static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
423 enum pipe pipe)
424{
425 struct drm_i915_private *dev_priv = dev->dev_private;
426 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
427 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
428
429 return !intel_crtc->cpu_fifo_underrun_disabled;
430}
431
Paulo Zanoni86642812013-04-12 17:57:57 -0300432/**
433 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
434 * @dev: drm device
435 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
436 * @enable: true if we want to report FIFO underrun errors, false otherwise
437 *
438 * This function makes us disable or enable PCH fifo underruns for a specific
439 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
440 * underrun reporting for one transcoder may also disable all the other PCH
441 * error interruts for the other transcoders, due to the fact that there's just
442 * one interrupt mask/enable bit for all the transcoders.
443 *
444 * Returns the previous state of underrun reporting.
445 */
446bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
447 enum transcoder pch_transcoder,
448 bool enable)
449{
450 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200451 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
452 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni86642812013-04-12 17:57:57 -0300453 unsigned long flags;
454 bool ret;
455
Daniel Vetterde280752013-07-04 23:35:24 +0200456 /*
457 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
458 * has only one pch transcoder A that all pipes can use. To avoid racy
459 * pch transcoder -> pipe lookups from interrupt code simply store the
460 * underrun statistics in crtc A. Since we never expose this anywhere
461 * nor use it outside of the fifo underrun code here using the "wrong"
462 * crtc on LPT won't cause issues.
463 */
Paulo Zanoni86642812013-04-12 17:57:57 -0300464
465 spin_lock_irqsave(&dev_priv->irq_lock, flags);
466
467 ret = !intel_crtc->pch_fifo_underrun_disabled;
468
469 if (enable == ret)
470 goto done;
471
472 intel_crtc->pch_fifo_underrun_disabled = !enable;
473
474 if (HAS_PCH_IBX(dev))
Daniel Vetterde280752013-07-04 23:35:24 +0200475 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300476 else
477 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
478
479done:
480 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
481 return ret;
482}
483
484
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100485static void
Imre Deak755e9012014-02-10 18:42:47 +0200486__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
487 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800488{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200489 u32 reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200490 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800491
Daniel Vetterb79480b2013-06-27 17:52:10 +0200492 assert_spin_locked(&dev_priv->irq_lock);
493
Imre Deak755e9012014-02-10 18:42:47 +0200494 if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
495 status_mask & ~PIPESTAT_INT_STATUS_MASK))
496 return;
497
498 if ((pipestat & enable_mask) == enable_mask)
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200499 return;
500
Imre Deak91d181d2014-02-10 18:42:49 +0200501 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
502
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200503 /* Enable the interrupt, clear any pending status */
Imre Deak755e9012014-02-10 18:42:47 +0200504 pipestat |= enable_mask | status_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200505 I915_WRITE(reg, pipestat);
506 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800507}
508
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100509static void
Imre Deak755e9012014-02-10 18:42:47 +0200510__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
511 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800512{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200513 u32 reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200514 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800515
Daniel Vetterb79480b2013-06-27 17:52:10 +0200516 assert_spin_locked(&dev_priv->irq_lock);
517
Imre Deak755e9012014-02-10 18:42:47 +0200518 if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
519 status_mask & ~PIPESTAT_INT_STATUS_MASK))
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200520 return;
521
Imre Deak755e9012014-02-10 18:42:47 +0200522 if ((pipestat & enable_mask) == 0)
523 return;
524
Imre Deak91d181d2014-02-10 18:42:49 +0200525 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
526
Imre Deak755e9012014-02-10 18:42:47 +0200527 pipestat &= ~enable_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200528 I915_WRITE(reg, pipestat);
529 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800530}
531
Imre Deak10c59c52014-02-10 18:42:48 +0200532static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
533{
534 u32 enable_mask = status_mask << 16;
535
536 /*
537 * On pipe A we don't support the PSR interrupt yet, on pipe B the
538 * same bit MBZ.
539 */
540 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
541 return 0;
542
543 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
544 SPRITE0_FLIP_DONE_INT_EN_VLV |
545 SPRITE1_FLIP_DONE_INT_EN_VLV);
546 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
547 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
548 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
549 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
550
551 return enable_mask;
552}
553
Imre Deak755e9012014-02-10 18:42:47 +0200554void
555i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
556 u32 status_mask)
557{
558 u32 enable_mask;
559
Imre Deak10c59c52014-02-10 18:42:48 +0200560 if (IS_VALLEYVIEW(dev_priv->dev))
561 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
562 status_mask);
563 else
564 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200565 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
566}
567
568void
569i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
570 u32 status_mask)
571{
572 u32 enable_mask;
573
Imre Deak10c59c52014-02-10 18:42:48 +0200574 if (IS_VALLEYVIEW(dev_priv->dev))
575 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
576 status_mask);
577 else
578 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200579 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
580}
581
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000582/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300583 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Zhao Yakui01c66882009-10-28 05:10:00 +0000584 */
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300585static void i915_enable_asle_pipestat(struct drm_device *dev)
Zhao Yakui01c66882009-10-28 05:10:00 +0000586{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000587 drm_i915_private_t *dev_priv = dev->dev_private;
588 unsigned long irqflags;
589
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300590 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
591 return;
592
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000593 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000594
Imre Deak755e9012014-02-10 18:42:47 +0200595 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
Jani Nikulaf8987802013-04-29 13:02:53 +0300596 if (INTEL_INFO(dev)->gen >= 4)
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200597 i915_enable_pipestat(dev_priv, PIPE_A,
Imre Deak755e9012014-02-10 18:42:47 +0200598 PIPE_LEGACY_BLC_EVENT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000599
600 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000601}
602
603/**
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700604 * i915_pipe_enabled - check if a pipe is enabled
605 * @dev: DRM device
606 * @pipe: pipe to check
607 *
608 * Reading certain registers when the pipe is disabled can hang the chip.
609 * Use this routine to make sure the PLL is running and the pipe is active
610 * before reading such registers if unsure.
611 */
612static int
613i915_pipe_enabled(struct drm_device *dev, int pipe)
614{
615 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni702e7a52012-10-23 18:29:59 -0200616
Daniel Vettera01025a2013-05-22 00:50:23 +0200617 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
618 /* Locking is horribly broken here, but whatever. */
619 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
620 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni71f8ba62013-05-03 12:15:39 -0300621
Daniel Vettera01025a2013-05-22 00:50:23 +0200622 return intel_crtc->active;
623 } else {
624 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
625 }
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700626}
627
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +0300628static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
629{
630 /* Gen2 doesn't have a hardware frame counter */
631 return 0;
632}
633
Keith Packard42f52ef2008-10-18 19:39:29 -0700634/* Called from drm generic code, passed a 'crtc', which
635 * we use as a pipe index
636 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700637static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700638{
639 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
640 unsigned long high_frame;
641 unsigned long low_frame;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300642 u32 high1, high2, low, pixel, vbl_start;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700643
644 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800645 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800646 "pipe %c\n", pipe_name(pipe));
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700647 return 0;
648 }
649
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300650 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
651 struct intel_crtc *intel_crtc =
652 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
653 const struct drm_display_mode *mode =
654 &intel_crtc->config.adjusted_mode;
655
656 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
657 } else {
658 enum transcoder cpu_transcoder =
659 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
660 u32 htotal;
661
662 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
663 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
664
665 vbl_start *= htotal;
666 }
667
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800668 high_frame = PIPEFRAME(pipe);
669 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100670
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700671 /*
672 * High & low register fields aren't synchronized, so make sure
673 * we get a low value that's stable across two reads of the high
674 * register.
675 */
676 do {
Chris Wilson5eddb702010-09-11 13:48:45 +0100677 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300678 low = I915_READ(low_frame);
Chris Wilson5eddb702010-09-11 13:48:45 +0100679 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700680 } while (high1 != high2);
681
Chris Wilson5eddb702010-09-11 13:48:45 +0100682 high1 >>= PIPE_FRAME_HIGH_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300683 pixel = low & PIPE_PIXEL_MASK;
Chris Wilson5eddb702010-09-11 13:48:45 +0100684 low >>= PIPE_FRAME_LOW_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300685
686 /*
687 * The frame counter increments at beginning of active.
688 * Cook up a vblank counter by also checking the pixel
689 * counter against vblank start.
690 */
Ville Syrjäläedc08d02013-11-06 13:56:27 -0200691 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700692}
693
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700694static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800695{
696 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800697 int reg = PIPE_FRMCOUNT_GM45(pipe);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800698
699 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800700 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800701 "pipe %c\n", pipe_name(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800702 return 0;
703 }
704
705 return I915_READ(reg);
706}
707
Mario Kleinerad3543e2013-10-30 05:13:08 +0100708/* raw reads, only for fast reads of display block, no need for forcewake etc. */
709#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
710#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
711
Ville Syrjälä095163b2013-10-29 00:04:43 +0200712static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300713{
714 struct drm_i915_private *dev_priv = dev->dev_private;
715 uint32_t status;
716
Ville Syrjälä095163b2013-10-29 00:04:43 +0200717 if (INTEL_INFO(dev)->gen < 7) {
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300718 status = pipe == PIPE_A ?
719 DE_PIPEA_VBLANK :
720 DE_PIPEB_VBLANK;
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300721 } else {
722 switch (pipe) {
723 default:
724 case PIPE_A:
725 status = DE_PIPEA_VBLANK_IVB;
726 break;
727 case PIPE_B:
728 status = DE_PIPEB_VBLANK_IVB;
729 break;
730 case PIPE_C:
731 status = DE_PIPEC_VBLANK_IVB;
732 break;
733 }
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300734 }
Mario Kleinerad3543e2013-10-30 05:13:08 +0100735
Ville Syrjälä095163b2013-10-29 00:04:43 +0200736 return __raw_i915_read32(dev_priv, DEISR) & status;
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300737}
738
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700739static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
Ville Syrjäläabca9e42013-10-28 20:50:48 +0200740 unsigned int flags, int *vpos, int *hpos,
741 ktime_t *stime, ktime_t *etime)
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100742{
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300743 struct drm_i915_private *dev_priv = dev->dev_private;
744 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
745 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
746 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300747 int position;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100748 int vbl_start, vbl_end, htotal, vtotal;
749 bool in_vbl = true;
750 int ret = 0;
Mario Kleinerad3543e2013-10-30 05:13:08 +0100751 unsigned long irqflags;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100752
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300753 if (!intel_crtc->active) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100754 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800755 "pipe %c\n", pipe_name(pipe));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100756 return 0;
757 }
758
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300759 htotal = mode->crtc_htotal;
760 vtotal = mode->crtc_vtotal;
761 vbl_start = mode->crtc_vblank_start;
762 vbl_end = mode->crtc_vblank_end;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100763
Ville Syrjäläd31faf62013-10-28 16:31:41 +0200764 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
765 vbl_start = DIV_ROUND_UP(vbl_start, 2);
766 vbl_end /= 2;
767 vtotal /= 2;
768 }
769
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300770 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
771
Mario Kleinerad3543e2013-10-30 05:13:08 +0100772 /*
773 * Lock uncore.lock, as we will do multiple timing critical raw
774 * register reads, potentially with preemption disabled, so the
775 * following code must not block on uncore.lock.
776 */
777 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
778
779 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
780
781 /* Get optional system timestamp before query. */
782 if (stime)
783 *stime = ktime_get();
784
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300785 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100786 /* No obvious pixelcount register. Only query vertical
787 * scanout position from Display scan line register.
788 */
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300789 if (IS_GEN2(dev))
Mario Kleinerad3543e2013-10-30 05:13:08 +0100790 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300791 else
Mario Kleinerad3543e2013-10-30 05:13:08 +0100792 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300793
Ville Syrjälä095163b2013-10-29 00:04:43 +0200794 if (HAS_PCH_SPLIT(dev)) {
795 /*
796 * The scanline counter increments at the leading edge
797 * of hsync, ie. it completely misses the active portion
798 * of the line. Fix up the counter at both edges of vblank
799 * to get a more accurate picture whether we're in vblank
800 * or not.
801 */
802 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
803 if ((in_vbl && position == vbl_start - 1) ||
804 (!in_vbl && position == vbl_end - 1))
805 position = (position + 1) % vtotal;
806 } else {
807 /*
808 * ISR vblank status bits don't work the way we'd want
809 * them to work on non-PCH platforms (for
810 * ilk_pipe_in_vblank_locked()), and there doesn't
811 * appear any other way to determine if we're currently
812 * in vblank.
813 *
814 * Instead let's assume that we're already in vblank if
815 * we got called from the vblank interrupt and the
816 * scanline counter value indicates that we're on the
817 * line just prior to vblank start. This should result
818 * in the correct answer, unless the vblank interrupt
819 * delivery really got delayed for almost exactly one
820 * full frame/field.
821 */
822 if (flags & DRM_CALLED_FROM_VBLIRQ &&
823 position == vbl_start - 1) {
824 position = (position + 1) % vtotal;
825
826 /* Signal this correction as "applied". */
827 ret |= 0x8;
828 }
829 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100830 } else {
831 /* Have access to pixelcount since start of frame.
832 * We can split this into vertical and horizontal
833 * scanout position.
834 */
Mario Kleinerad3543e2013-10-30 05:13:08 +0100835 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100836
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300837 /* convert to pixel counts */
838 vbl_start *= htotal;
839 vbl_end *= htotal;
840 vtotal *= htotal;
841 }
842
Mario Kleinerad3543e2013-10-30 05:13:08 +0100843 /* Get optional system timestamp after query. */
844 if (etime)
845 *etime = ktime_get();
846
847 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
848
849 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
850
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300851 in_vbl = position >= vbl_start && position < vbl_end;
852
853 /*
854 * While in vblank, position will be negative
855 * counting up towards 0 at vbl_end. And outside
856 * vblank, position will be positive counting
857 * up since vbl_end.
858 */
859 if (position >= vbl_start)
860 position -= vbl_end;
861 else
862 position += vtotal - vbl_end;
863
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300864 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300865 *vpos = position;
866 *hpos = 0;
867 } else {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100868 *vpos = position / htotal;
869 *hpos = position - (*vpos * htotal);
870 }
871
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100872 /* In vblank? */
873 if (in_vbl)
874 ret |= DRM_SCANOUTPOS_INVBL;
875
876 return ret;
877}
878
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700879static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100880 int *max_error,
881 struct timeval *vblank_time,
882 unsigned flags)
883{
Chris Wilson4041b852011-01-22 10:07:56 +0000884 struct drm_crtc *crtc;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100885
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700886 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
Chris Wilson4041b852011-01-22 10:07:56 +0000887 DRM_ERROR("Invalid crtc %d\n", pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100888 return -EINVAL;
889 }
890
891 /* Get drm_crtc to timestamp: */
Chris Wilson4041b852011-01-22 10:07:56 +0000892 crtc = intel_get_crtc_for_pipe(dev, pipe);
893 if (crtc == NULL) {
894 DRM_ERROR("Invalid crtc %d\n", pipe);
895 return -EINVAL;
896 }
897
898 if (!crtc->enabled) {
899 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
900 return -EBUSY;
901 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100902
903 /* Helper routine in DRM core does all the work: */
Chris Wilson4041b852011-01-22 10:07:56 +0000904 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
905 vblank_time, flags,
Ville Syrjälä7da903e2013-10-26 17:57:31 +0300906 crtc,
907 &to_intel_crtc(crtc)->config.adjusted_mode);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100908}
909
Jani Nikula67c347f2013-09-17 14:26:34 +0300910static bool intel_hpd_irq_event(struct drm_device *dev,
911 struct drm_connector *connector)
Egbert Eich321a1b32013-04-11 16:00:26 +0200912{
913 enum drm_connector_status old_status;
914
915 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
916 old_status = connector->status;
917
918 connector->status = connector->funcs->detect(connector, false);
Jani Nikula67c347f2013-09-17 14:26:34 +0300919 if (old_status == connector->status)
920 return false;
921
922 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
Egbert Eich321a1b32013-04-11 16:00:26 +0200923 connector->base.id,
924 drm_get_connector_name(connector),
Jani Nikula67c347f2013-09-17 14:26:34 +0300925 drm_get_connector_status_name(old_status),
926 drm_get_connector_status_name(connector->status));
927
928 return true;
Egbert Eich321a1b32013-04-11 16:00:26 +0200929}
930
Jesse Barnes5ca58282009-03-31 14:11:15 -0700931/*
932 * Handle hotplug events outside the interrupt handler proper.
933 */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200934#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
935
Jesse Barnes5ca58282009-03-31 14:11:15 -0700936static void i915_hotplug_work_func(struct work_struct *work)
937{
938 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
939 hotplug_work);
940 struct drm_device *dev = dev_priv->dev;
Keith Packardc31c4ba2009-05-06 11:48:58 -0700941 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200942 struct intel_connector *intel_connector;
943 struct intel_encoder *intel_encoder;
944 struct drm_connector *connector;
945 unsigned long irqflags;
946 bool hpd_disabled = false;
Egbert Eich321a1b32013-04-11 16:00:26 +0200947 bool changed = false;
Egbert Eich142e2392013-04-11 15:57:57 +0200948 u32 hpd_event_bits;
Jesse Barnes5ca58282009-03-31 14:11:15 -0700949
Daniel Vetter52d7ece2012-12-01 21:03:22 +0100950 /* HPD irq before everything is fully set up. */
951 if (!dev_priv->enable_hotplug_processing)
952 return;
953
Keith Packarda65e34c2011-07-25 10:04:56 -0700954 mutex_lock(&mode_config->mutex);
Jesse Barnese67189ab2011-02-11 14:44:51 -0800955 DRM_DEBUG_KMS("running encoder hotplug functions\n");
956
Egbert Eichcd569ae2013-04-16 13:36:57 +0200957 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Egbert Eich142e2392013-04-11 15:57:57 +0200958
959 hpd_event_bits = dev_priv->hpd_event_bits;
960 dev_priv->hpd_event_bits = 0;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200961 list_for_each_entry(connector, &mode_config->connector_list, head) {
962 intel_connector = to_intel_connector(connector);
963 intel_encoder = intel_connector->encoder;
964 if (intel_encoder->hpd_pin > HPD_NONE &&
965 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
966 connector->polled == DRM_CONNECTOR_POLL_HPD) {
967 DRM_INFO("HPD interrupt storm detected on connector %s: "
968 "switching from hotplug detection to polling\n",
969 drm_get_connector_name(connector));
970 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
971 connector->polled = DRM_CONNECTOR_POLL_CONNECT
972 | DRM_CONNECTOR_POLL_DISCONNECT;
973 hpd_disabled = true;
974 }
Egbert Eich142e2392013-04-11 15:57:57 +0200975 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
976 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
977 drm_get_connector_name(connector), intel_encoder->hpd_pin);
978 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200979 }
980 /* if there were no outputs to poll, poll was disabled,
981 * therefore make sure it's enabled when disabling HPD on
982 * some connectors */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200983 if (hpd_disabled) {
Egbert Eichcd569ae2013-04-16 13:36:57 +0200984 drm_kms_helper_poll_enable(dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +0200985 mod_timer(&dev_priv->hotplug_reenable_timer,
986 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
987 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200988
989 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
990
Egbert Eich321a1b32013-04-11 16:00:26 +0200991 list_for_each_entry(connector, &mode_config->connector_list, head) {
992 intel_connector = to_intel_connector(connector);
993 intel_encoder = intel_connector->encoder;
994 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
995 if (intel_encoder->hot_plug)
996 intel_encoder->hot_plug(intel_encoder);
997 if (intel_hpd_irq_event(dev, connector))
998 changed = true;
999 }
1000 }
Keith Packard40ee3382011-07-28 15:31:19 -07001001 mutex_unlock(&mode_config->mutex);
1002
Egbert Eich321a1b32013-04-11 16:00:26 +02001003 if (changed)
1004 drm_kms_helper_hotplug_event(dev);
Jesse Barnes5ca58282009-03-31 14:11:15 -07001005}
1006
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02001007static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
1008{
1009 del_timer_sync(&dev_priv->hotplug_reenable_timer);
1010}
1011
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001012static void ironlake_rps_change_irq_handler(struct drm_device *dev)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001013{
1014 drm_i915_private_t *dev_priv = dev->dev_private;
Matthew Garrettb5b72e82010-02-02 18:30:47 +00001015 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +02001016 u8 new_delay;
Daniel Vetter92703882012-08-09 16:46:01 +02001017
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001018 spin_lock(&mchdev_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001019
Daniel Vetter73edd18f2012-08-08 23:35:37 +02001020 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1021
Daniel Vetter20e4d402012-08-08 23:35:39 +02001022 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +02001023
Jesse Barnes7648fa92010-05-20 14:28:11 -07001024 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +00001025 busy_up = I915_READ(RCPREVBSYTUPAVG);
1026 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001027 max_avg = I915_READ(RCBMAXAVG);
1028 min_avg = I915_READ(RCBMINAVG);
1029
1030 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +00001031 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +02001032 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1033 new_delay = dev_priv->ips.cur_delay - 1;
1034 if (new_delay < dev_priv->ips.max_delay)
1035 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +00001036 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +02001037 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1038 new_delay = dev_priv->ips.cur_delay + 1;
1039 if (new_delay > dev_priv->ips.min_delay)
1040 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001041 }
1042
Jesse Barnes7648fa92010-05-20 14:28:11 -07001043 if (ironlake_set_drps(dev, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +02001044 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001045
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001046 spin_unlock(&mchdev_lock);
Daniel Vetter92703882012-08-09 16:46:01 +02001047
Jesse Barnesf97108d2010-01-29 11:27:07 -08001048 return;
1049}
1050
Chris Wilson549f7362010-10-19 11:19:32 +01001051static void notify_ring(struct drm_device *dev,
1052 struct intel_ring_buffer *ring)
1053{
Chris Wilson475553d2011-01-20 09:52:56 +00001054 if (ring->obj == NULL)
1055 return;
1056
Chris Wilson814e9b52013-09-23 17:33:19 -03001057 trace_i915_gem_request_complete(ring);
Chris Wilson9862e602011-01-04 22:22:17 +00001058
Chris Wilson549f7362010-10-19 11:19:32 +01001059 wake_up_all(&ring->irq_queue);
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03001060 i915_queue_hangcheck(dev);
Chris Wilson549f7362010-10-19 11:19:32 +01001061}
1062
Deepak S76c3552f2014-01-30 23:08:16 +05301063void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
Deepak S27544362014-01-27 21:35:05 +05301064 u32 pm_iir, int new_delay)
1065{
1066 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1067 if (new_delay >= dev_priv->rps.max_delay) {
1068 /* Mask UP THRESHOLD Interrupts */
1069 I915_WRITE(GEN6_PMINTRMSK,
1070 I915_READ(GEN6_PMINTRMSK) |
1071 GEN6_PM_RP_UP_THRESHOLD);
1072 dev_priv->rps.rp_up_masked = true;
1073 }
1074 if (dev_priv->rps.rp_down_masked) {
1075 /* UnMask DOWN THRESHOLD Interrupts */
1076 I915_WRITE(GEN6_PMINTRMSK,
1077 I915_READ(GEN6_PMINTRMSK) &
1078 ~GEN6_PM_RP_DOWN_THRESHOLD);
1079 dev_priv->rps.rp_down_masked = false;
1080 }
1081 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1082 if (new_delay <= dev_priv->rps.min_delay) {
1083 /* Mask DOWN THRESHOLD Interrupts */
1084 I915_WRITE(GEN6_PMINTRMSK,
1085 I915_READ(GEN6_PMINTRMSK) |
1086 GEN6_PM_RP_DOWN_THRESHOLD);
1087 dev_priv->rps.rp_down_masked = true;
1088 }
1089
1090 if (dev_priv->rps.rp_up_masked) {
1091 /* UnMask UP THRESHOLD Interrupts */
1092 I915_WRITE(GEN6_PMINTRMSK,
1093 I915_READ(GEN6_PMINTRMSK) &
1094 ~GEN6_PM_RP_UP_THRESHOLD);
1095 dev_priv->rps.rp_up_masked = false;
1096 }
1097 }
1098}
1099
Ben Widawsky4912d042011-04-25 11:25:20 -07001100static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001101{
Ben Widawsky4912d042011-04-25 11:25:20 -07001102 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001103 rps.work);
Paulo Zanoniedbfdb42013-08-06 18:57:13 -03001104 u32 pm_iir;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001105 int new_delay, adj;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001106
Daniel Vetter59cdb632013-07-04 23:35:28 +02001107 spin_lock_irq(&dev_priv->irq_lock);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001108 pm_iir = dev_priv->rps.pm_iir;
1109 dev_priv->rps.pm_iir = 0;
Ben Widawsky48484052013-05-28 19:22:27 -07001110 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
Paulo Zanoniedbfdb42013-08-06 18:57:13 -03001111 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
Daniel Vetter59cdb632013-07-04 23:35:28 +02001112 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawsky4912d042011-04-25 11:25:20 -07001113
Paulo Zanoni60611c12013-08-15 11:50:01 -03001114 /* Make sure we didn't queue anything we're not going to process. */
1115 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
1116
Ben Widawsky48484052013-05-28 19:22:27 -07001117 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001118 return;
1119
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001120 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01001121
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001122 adj = dev_priv->rps.last_adj;
Ville Syrjälä74250342013-06-25 21:38:11 +03001123 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001124 if (adj > 0)
1125 adj *= 2;
1126 else
1127 adj = 1;
1128 new_delay = dev_priv->rps.cur_delay + adj;
Ville Syrjälä74250342013-06-25 21:38:11 +03001129
1130 /*
1131 * For better performance, jump directly
1132 * to RPe if we're below it.
1133 */
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001134 if (new_delay < dev_priv->rps.rpe_delay)
Ville Syrjälä74250342013-06-25 21:38:11 +03001135 new_delay = dev_priv->rps.rpe_delay;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001136 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1137 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
1138 new_delay = dev_priv->rps.rpe_delay;
1139 else
1140 new_delay = dev_priv->rps.min_delay;
1141 adj = 0;
1142 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1143 if (adj < 0)
1144 adj *= 2;
1145 else
1146 adj = -1;
1147 new_delay = dev_priv->rps.cur_delay + adj;
1148 } else { /* unknown event */
1149 new_delay = dev_priv->rps.cur_delay;
1150 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001151
Ben Widawsky79249632012-09-07 19:43:42 -07001152 /* sysfs frequency interfaces may have snuck in while servicing the
1153 * interrupt
1154 */
Ville Syrjälä1272e7b2013-11-07 19:57:49 +02001155 new_delay = clamp_t(int, new_delay,
1156 dev_priv->rps.min_delay, dev_priv->rps.max_delay);
Deepak S27544362014-01-27 21:35:05 +05301157
1158 gen6_set_pm_mask(dev_priv, pm_iir, new_delay);
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001159 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
1160
1161 if (IS_VALLEYVIEW(dev_priv->dev))
1162 valleyview_set_rps(dev_priv->dev, new_delay);
1163 else
1164 gen6_set_rps(dev_priv->dev, new_delay);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001165
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001166 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001167}
1168
Ben Widawskye3689192012-05-25 16:56:22 -07001169
1170/**
1171 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1172 * occurred.
1173 * @work: workqueue struct
1174 *
1175 * Doesn't actually do anything except notify userspace. As a consequence of
1176 * this event, userspace should try to remap the bad rows since statistically
1177 * it is likely the same row is more likely to go bad again.
1178 */
1179static void ivybridge_parity_work(struct work_struct *work)
1180{
1181 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001182 l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001183 u32 error_status, row, bank, subbank;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001184 char *parity_event[6];
Ben Widawskye3689192012-05-25 16:56:22 -07001185 uint32_t misccpctl;
1186 unsigned long flags;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001187 uint8_t slice = 0;
Ben Widawskye3689192012-05-25 16:56:22 -07001188
1189 /* We must turn off DOP level clock gating to access the L3 registers.
1190 * In order to prevent a get/put style interface, acquire struct mutex
1191 * any time we access those registers.
1192 */
1193 mutex_lock(&dev_priv->dev->struct_mutex);
1194
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001195 /* If we've screwed up tracking, just let the interrupt fire again */
1196 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1197 goto out;
1198
Ben Widawskye3689192012-05-25 16:56:22 -07001199 misccpctl = I915_READ(GEN7_MISCCPCTL);
1200 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1201 POSTING_READ(GEN7_MISCCPCTL);
1202
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001203 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1204 u32 reg;
Ben Widawskye3689192012-05-25 16:56:22 -07001205
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001206 slice--;
1207 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1208 break;
1209
1210 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1211
1212 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1213
1214 error_status = I915_READ(reg);
1215 row = GEN7_PARITY_ERROR_ROW(error_status);
1216 bank = GEN7_PARITY_ERROR_BANK(error_status);
1217 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1218
1219 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1220 POSTING_READ(reg);
1221
1222 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1223 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1224 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1225 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1226 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1227 parity_event[5] = NULL;
1228
Dave Airlie5bdebb12013-10-11 14:07:25 +10001229 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001230 KOBJ_CHANGE, parity_event);
1231
1232 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1233 slice, row, bank, subbank);
1234
1235 kfree(parity_event[4]);
1236 kfree(parity_event[3]);
1237 kfree(parity_event[2]);
1238 kfree(parity_event[1]);
1239 }
Ben Widawskye3689192012-05-25 16:56:22 -07001240
1241 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1242
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001243out:
1244 WARN_ON(dev_priv->l3_parity.which_slice);
Ben Widawskye3689192012-05-25 16:56:22 -07001245 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001246 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
Ben Widawskye3689192012-05-25 16:56:22 -07001247 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1248
1249 mutex_unlock(&dev_priv->dev->struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001250}
1251
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001252static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
Ben Widawskye3689192012-05-25 16:56:22 -07001253{
1254 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Ben Widawskye3689192012-05-25 16:56:22 -07001255
Ben Widawsky040d2ba2013-09-19 11:01:40 -07001256 if (!HAS_L3_DPF(dev))
Ben Widawskye3689192012-05-25 16:56:22 -07001257 return;
1258
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001259 spin_lock(&dev_priv->irq_lock);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001260 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001261 spin_unlock(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001262
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001263 iir &= GT_PARITY_ERROR(dev);
1264 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1265 dev_priv->l3_parity.which_slice |= 1 << 1;
1266
1267 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1268 dev_priv->l3_parity.which_slice |= 1 << 0;
1269
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001270 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001271}
1272
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001273static void ilk_gt_irq_handler(struct drm_device *dev,
1274 struct drm_i915_private *dev_priv,
1275 u32 gt_iir)
1276{
1277 if (gt_iir &
1278 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1279 notify_ring(dev, &dev_priv->ring[RCS]);
1280 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1281 notify_ring(dev, &dev_priv->ring[VCS]);
1282}
1283
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001284static void snb_gt_irq_handler(struct drm_device *dev,
1285 struct drm_i915_private *dev_priv,
1286 u32 gt_iir)
1287{
1288
Ben Widawskycc609d52013-05-28 19:22:29 -07001289 if (gt_iir &
1290 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001291 notify_ring(dev, &dev_priv->ring[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001292 if (gt_iir & GT_BSD_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001293 notify_ring(dev, &dev_priv->ring[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001294 if (gt_iir & GT_BLT_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001295 notify_ring(dev, &dev_priv->ring[BCS]);
1296
Ben Widawskycc609d52013-05-28 19:22:29 -07001297 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1298 GT_BSD_CS_ERROR_INTERRUPT |
1299 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
Mika Kuoppala58174462014-02-25 17:11:26 +02001300 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1301 gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001302 }
Ben Widawskye3689192012-05-25 16:56:22 -07001303
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001304 if (gt_iir & GT_PARITY_ERROR(dev))
1305 ivybridge_parity_error_irq_handler(dev, gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001306}
1307
Ben Widawskyabd58f02013-11-02 21:07:09 -07001308static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1309 struct drm_i915_private *dev_priv,
1310 u32 master_ctl)
1311{
1312 u32 rcs, bcs, vcs;
1313 uint32_t tmp = 0;
1314 irqreturn_t ret = IRQ_NONE;
1315
1316 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1317 tmp = I915_READ(GEN8_GT_IIR(0));
1318 if (tmp) {
1319 ret = IRQ_HANDLED;
1320 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1321 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1322 if (rcs & GT_RENDER_USER_INTERRUPT)
1323 notify_ring(dev, &dev_priv->ring[RCS]);
1324 if (bcs & GT_RENDER_USER_INTERRUPT)
1325 notify_ring(dev, &dev_priv->ring[BCS]);
1326 I915_WRITE(GEN8_GT_IIR(0), tmp);
1327 } else
1328 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1329 }
1330
1331 if (master_ctl & GEN8_GT_VCS1_IRQ) {
1332 tmp = I915_READ(GEN8_GT_IIR(1));
1333 if (tmp) {
1334 ret = IRQ_HANDLED;
1335 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1336 if (vcs & GT_RENDER_USER_INTERRUPT)
1337 notify_ring(dev, &dev_priv->ring[VCS]);
1338 I915_WRITE(GEN8_GT_IIR(1), tmp);
1339 } else
1340 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1341 }
1342
1343 if (master_ctl & GEN8_GT_VECS_IRQ) {
1344 tmp = I915_READ(GEN8_GT_IIR(3));
1345 if (tmp) {
1346 ret = IRQ_HANDLED;
1347 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1348 if (vcs & GT_RENDER_USER_INTERRUPT)
1349 notify_ring(dev, &dev_priv->ring[VECS]);
1350 I915_WRITE(GEN8_GT_IIR(3), tmp);
1351 } else
1352 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1353 }
1354
1355 return ret;
1356}
1357
Egbert Eichb543fb02013-04-16 13:36:54 +02001358#define HPD_STORM_DETECT_PERIOD 1000
1359#define HPD_STORM_THRESHOLD 5
1360
Daniel Vetter10a504d2013-06-27 17:52:12 +02001361static inline void intel_hpd_irq_handler(struct drm_device *dev,
Daniel Vetter22062db2013-06-27 17:52:11 +02001362 u32 hotplug_trigger,
1363 const u32 *hpd)
Egbert Eichb543fb02013-04-16 13:36:54 +02001364{
1365 drm_i915_private_t *dev_priv = dev->dev_private;
Egbert Eichb543fb02013-04-16 13:36:54 +02001366 int i;
Daniel Vetter10a504d2013-06-27 17:52:12 +02001367 bool storm_detected = false;
Egbert Eichb543fb02013-04-16 13:36:54 +02001368
Daniel Vetter91d131d2013-06-27 17:52:14 +02001369 if (!hotplug_trigger)
1370 return;
1371
Imre Deakcc9bd492014-01-16 19:56:54 +02001372 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1373 hotplug_trigger);
1374
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02001375 spin_lock(&dev_priv->irq_lock);
Egbert Eichb543fb02013-04-16 13:36:54 +02001376 for (i = 1; i < HPD_NUM_PINS; i++) {
Egbert Eich821450c2013-04-16 13:36:55 +02001377
Chris Wilson34320872014-01-10 18:49:20 +00001378 WARN_ONCE(hpd[i] & hotplug_trigger &&
Chris Wilson8b5565b2014-01-10 18:49:21 +00001379 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
Chris Wilsoncba1c072014-01-10 20:17:07 +00001380 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1381 hotplug_trigger, i, hpd[i]);
Egbert Eichb8f102e2013-07-26 14:14:24 +02001382
Egbert Eichb543fb02013-04-16 13:36:54 +02001383 if (!(hpd[i] & hotplug_trigger) ||
1384 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1385 continue;
1386
Jani Nikulabc5ead8c2013-05-07 15:10:29 +03001387 dev_priv->hpd_event_bits |= (1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001388 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1389 dev_priv->hpd_stats[i].hpd_last_jiffies
1390 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1391 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1392 dev_priv->hpd_stats[i].hpd_cnt = 0;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001393 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001394 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1395 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
Egbert Eich142e2392013-04-11 15:57:57 +02001396 dev_priv->hpd_event_bits &= ~(1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001397 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
Daniel Vetter10a504d2013-06-27 17:52:12 +02001398 storm_detected = true;
Egbert Eichb543fb02013-04-16 13:36:54 +02001399 } else {
1400 dev_priv->hpd_stats[i].hpd_cnt++;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001401 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1402 dev_priv->hpd_stats[i].hpd_cnt);
Egbert Eichb543fb02013-04-16 13:36:54 +02001403 }
1404 }
1405
Daniel Vetter10a504d2013-06-27 17:52:12 +02001406 if (storm_detected)
1407 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02001408 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter5876fa02013-06-27 17:52:13 +02001409
Daniel Vetter645416f2013-09-02 16:22:25 +02001410 /*
1411 * Our hotplug handler can grab modeset locks (by calling down into the
1412 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1413 * queue for otherwise the flush_work in the pageflip code will
1414 * deadlock.
1415 */
1416 schedule_work(&dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02001417}
1418
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001419static void gmbus_irq_handler(struct drm_device *dev)
1420{
Daniel Vetter28c70f12012-12-01 13:53:45 +01001421 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1422
Daniel Vetter28c70f12012-12-01 13:53:45 +01001423 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001424}
1425
Daniel Vetterce99c252012-12-01 13:53:47 +01001426static void dp_aux_irq_handler(struct drm_device *dev)
1427{
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001428 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1429
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001430 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +01001431}
1432
Shuang He8bf1e9f2013-10-15 18:55:27 +01001433#if defined(CONFIG_DEBUG_FS)
Daniel Vetter277de952013-10-18 16:37:07 +02001434static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1435 uint32_t crc0, uint32_t crc1,
1436 uint32_t crc2, uint32_t crc3,
1437 uint32_t crc4)
Shuang He8bf1e9f2013-10-15 18:55:27 +01001438{
1439 struct drm_i915_private *dev_priv = dev->dev_private;
1440 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1441 struct intel_pipe_crc_entry *entry;
Damien Lespiauac2300d2013-10-15 18:55:30 +01001442 int head, tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001443
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001444 spin_lock(&pipe_crc->lock);
1445
Damien Lespiau0c912c72013-10-15 18:55:37 +01001446 if (!pipe_crc->entries) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001447 spin_unlock(&pipe_crc->lock);
Damien Lespiau0c912c72013-10-15 18:55:37 +01001448 DRM_ERROR("spurious interrupt\n");
1449 return;
1450 }
1451
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001452 head = pipe_crc->head;
1453 tail = pipe_crc->tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001454
1455 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001456 spin_unlock(&pipe_crc->lock);
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001457 DRM_ERROR("CRC buffer overflowing\n");
1458 return;
1459 }
1460
1461 entry = &pipe_crc->entries[head];
Shuang He8bf1e9f2013-10-15 18:55:27 +01001462
Daniel Vetter8bc5e952013-10-16 22:55:49 +02001463 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
Daniel Vettereba94eb2013-10-16 22:55:46 +02001464 entry->crc[0] = crc0;
1465 entry->crc[1] = crc1;
1466 entry->crc[2] = crc2;
1467 entry->crc[3] = crc3;
1468 entry->crc[4] = crc4;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001469
1470 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001471 pipe_crc->head = head;
1472
1473 spin_unlock(&pipe_crc->lock);
Damien Lespiau07144422013-10-15 18:55:40 +01001474
1475 wake_up_interruptible(&pipe_crc->wq);
Shuang He8bf1e9f2013-10-15 18:55:27 +01001476}
Daniel Vetter277de952013-10-18 16:37:07 +02001477#else
1478static inline void
1479display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1480 uint32_t crc0, uint32_t crc1,
1481 uint32_t crc2, uint32_t crc3,
1482 uint32_t crc4) {}
1483#endif
Daniel Vettereba94eb2013-10-16 22:55:46 +02001484
Daniel Vetter277de952013-10-18 16:37:07 +02001485
1486static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vetter5a69b892013-10-16 22:55:52 +02001487{
1488 struct drm_i915_private *dev_priv = dev->dev_private;
1489
Daniel Vetter277de952013-10-18 16:37:07 +02001490 display_pipe_crc_irq_handler(dev, pipe,
1491 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1492 0, 0, 0, 0);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001493}
1494
Daniel Vetter277de952013-10-18 16:37:07 +02001495static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vettereba94eb2013-10-16 22:55:46 +02001496{
1497 struct drm_i915_private *dev_priv = dev->dev_private;
1498
Daniel Vetter277de952013-10-18 16:37:07 +02001499 display_pipe_crc_irq_handler(dev, pipe,
1500 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1501 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1502 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1503 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1504 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
Daniel Vettereba94eb2013-10-16 22:55:46 +02001505}
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001506
Daniel Vetter277de952013-10-18 16:37:07 +02001507static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001508{
1509 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001510 uint32_t res1, res2;
1511
1512 if (INTEL_INFO(dev)->gen >= 3)
1513 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1514 else
1515 res1 = 0;
1516
1517 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1518 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1519 else
1520 res2 = 0;
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001521
Daniel Vetter277de952013-10-18 16:37:07 +02001522 display_pipe_crc_irq_handler(dev, pipe,
1523 I915_READ(PIPE_CRC_RES_RED(pipe)),
1524 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1525 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1526 res1, res2);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001527}
Shuang He8bf1e9f2013-10-15 18:55:27 +01001528
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001529/* The RPS events need forcewake, so we add them to a work queue and mask their
1530 * IMR bits until the work is done. Other interrupts can be processed without
1531 * the work queue. */
1532static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
Ben Widawskybaf02a12013-05-28 19:22:24 -07001533{
Daniel Vetter41a05a32013-07-04 23:35:26 +02001534 if (pm_iir & GEN6_PM_RPS_EVENTS) {
Daniel Vetter59cdb632013-07-04 23:35:28 +02001535 spin_lock(&dev_priv->irq_lock);
Daniel Vetter41a05a32013-07-04 23:35:26 +02001536 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
Paulo Zanoni4d3b3d52013-08-09 17:04:36 -03001537 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
Daniel Vetter59cdb632013-07-04 23:35:28 +02001538 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter2adbee62013-07-04 23:35:27 +02001539
1540 queue_work(dev_priv->wq, &dev_priv->rps.work);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001541 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001542
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001543 if (HAS_VEBOX(dev_priv->dev)) {
1544 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1545 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
Ben Widawsky12638c52013-05-28 19:22:31 -07001546
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001547 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
Mika Kuoppala58174462014-02-25 17:11:26 +02001548 i915_handle_error(dev_priv->dev, false,
1549 "VEBOX CS error interrupt 0x%08x",
1550 pm_iir);
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001551 }
Ben Widawsky12638c52013-05-28 19:22:31 -07001552 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001553}
1554
Imre Deakc1874ed2014-02-04 21:35:46 +02001555static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1556{
1557 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak91d181d2014-02-10 18:42:49 +02001558 u32 pipe_stats[I915_MAX_PIPES] = { };
Imre Deakc1874ed2014-02-04 21:35:46 +02001559 int pipe;
1560
Imre Deak58ead0d2014-02-04 21:35:47 +02001561 spin_lock(&dev_priv->irq_lock);
Imre Deakc1874ed2014-02-04 21:35:46 +02001562 for_each_pipe(pipe) {
Imre Deak91d181d2014-02-10 18:42:49 +02001563 int reg;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001564 u32 mask, iir_bit = 0;
Imre Deak91d181d2014-02-10 18:42:49 +02001565
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001566 /*
1567 * PIPESTAT bits get signalled even when the interrupt is
1568 * disabled with the mask bits, and some of the status bits do
1569 * not generate interrupts at all (like the underrun bit). Hence
1570 * we need to be careful that we only handle what we want to
1571 * handle.
1572 */
1573 mask = 0;
1574 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
1575 mask |= PIPE_FIFO_UNDERRUN_STATUS;
1576
1577 switch (pipe) {
1578 case PIPE_A:
1579 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1580 break;
1581 case PIPE_B:
1582 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1583 break;
1584 }
1585 if (iir & iir_bit)
1586 mask |= dev_priv->pipestat_irq_mask[pipe];
1587
1588 if (!mask)
Imre Deak91d181d2014-02-10 18:42:49 +02001589 continue;
1590
1591 reg = PIPESTAT(pipe);
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001592 mask |= PIPESTAT_INT_ENABLE_MASK;
1593 pipe_stats[pipe] = I915_READ(reg) & mask;
Imre Deakc1874ed2014-02-04 21:35:46 +02001594
1595 /*
1596 * Clear the PIPE*STAT regs before the IIR
1597 */
Imre Deak91d181d2014-02-10 18:42:49 +02001598 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1599 PIPESTAT_INT_STATUS_MASK))
Imre Deakc1874ed2014-02-04 21:35:46 +02001600 I915_WRITE(reg, pipe_stats[pipe]);
1601 }
Imre Deak58ead0d2014-02-04 21:35:47 +02001602 spin_unlock(&dev_priv->irq_lock);
Imre Deakc1874ed2014-02-04 21:35:46 +02001603
1604 for_each_pipe(pipe) {
1605 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1606 drm_handle_vblank(dev, pipe);
1607
Imre Deak579a9b02014-02-04 21:35:48 +02001608 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
Imre Deakc1874ed2014-02-04 21:35:46 +02001609 intel_prepare_page_flip(dev, pipe);
1610 intel_finish_page_flip(dev, pipe);
1611 }
1612
1613 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1614 i9xx_pipe_crc_irq_handler(dev, pipe);
1615
1616 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
1617 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1618 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
1619 }
1620
1621 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1622 gmbus_irq_handler(dev);
1623}
1624
Daniel Vetterff1f5252012-10-02 15:10:55 +02001625static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001626{
1627 struct drm_device *dev = (struct drm_device *) arg;
1628 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1629 u32 iir, gt_iir, pm_iir;
1630 irqreturn_t ret = IRQ_NONE;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001631
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001632 while (true) {
1633 iir = I915_READ(VLV_IIR);
1634 gt_iir = I915_READ(GTIIR);
1635 pm_iir = I915_READ(GEN6_PMIIR);
1636
1637 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1638 goto out;
1639
1640 ret = IRQ_HANDLED;
1641
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001642 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001643
Imre Deakc1874ed2014-02-04 21:35:46 +02001644 valleyview_pipestat_irq_handler(dev, iir);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001645
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001646 /* Consume port. Then clear IIR or we'll miss events */
1647 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1648 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02001649 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001650
Daniel Vetter91d131d2013-06-27 17:52:14 +02001651 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1652
Daniel Vetter4aeebd72013-10-31 09:53:36 +01001653 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1654 dp_aux_irq_handler(dev);
1655
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001656 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1657 I915_READ(PORT_HOTPLUG_STAT);
1658 }
1659
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001660
Paulo Zanoni60611c12013-08-15 11:50:01 -03001661 if (pm_iir)
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001662 gen6_rps_irq_handler(dev_priv, pm_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001663
1664 I915_WRITE(GTIIR, gt_iir);
1665 I915_WRITE(GEN6_PMIIR, pm_iir);
1666 I915_WRITE(VLV_IIR, iir);
1667 }
1668
1669out:
1670 return ret;
1671}
1672
Adam Jackson23e81d62012-06-06 15:45:44 -04001673static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08001674{
1675 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001676 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001677 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08001678
Daniel Vetter91d131d2013-06-27 17:52:14 +02001679 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1680
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001681 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1682 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1683 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08001684 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001685 port_name(port));
1686 }
Jesse Barnes776ad802011-01-04 15:09:39 -08001687
Daniel Vetterce99c252012-12-01 13:53:47 +01001688 if (pch_iir & SDE_AUX_MASK)
1689 dp_aux_irq_handler(dev);
1690
Jesse Barnes776ad802011-01-04 15:09:39 -08001691 if (pch_iir & SDE_GMBUS)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001692 gmbus_irq_handler(dev);
Jesse Barnes776ad802011-01-04 15:09:39 -08001693
1694 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1695 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1696
1697 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1698 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1699
1700 if (pch_iir & SDE_POISON)
1701 DRM_ERROR("PCH poison interrupt\n");
1702
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001703 if (pch_iir & SDE_FDI_MASK)
1704 for_each_pipe(pipe)
1705 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1706 pipe_name(pipe),
1707 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08001708
1709 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1710 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1711
1712 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1713 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1714
Jesse Barnes776ad802011-01-04 15:09:39 -08001715 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Paulo Zanoni86642812013-04-12 17:57:57 -03001716 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1717 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001718 DRM_ERROR("PCH transcoder A FIFO underrun\n");
Paulo Zanoni86642812013-04-12 17:57:57 -03001719
1720 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1721 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1722 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001723 DRM_ERROR("PCH transcoder B FIFO underrun\n");
Paulo Zanoni86642812013-04-12 17:57:57 -03001724}
1725
1726static void ivb_err_int_handler(struct drm_device *dev)
1727{
1728 struct drm_i915_private *dev_priv = dev->dev_private;
1729 u32 err_int = I915_READ(GEN7_ERR_INT);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001730 enum pipe pipe;
Paulo Zanoni86642812013-04-12 17:57:57 -03001731
Paulo Zanonide032bf2013-04-12 17:57:58 -03001732 if (err_int & ERR_INT_POISON)
1733 DRM_ERROR("Poison interrupt\n");
1734
Daniel Vetter5a69b892013-10-16 22:55:52 +02001735 for_each_pipe(pipe) {
1736 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1737 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1738 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001739 DRM_ERROR("Pipe %c FIFO underrun\n",
1740 pipe_name(pipe));
Daniel Vetter5a69b892013-10-16 22:55:52 +02001741 }
Paulo Zanoni86642812013-04-12 17:57:57 -03001742
Daniel Vetter5a69b892013-10-16 22:55:52 +02001743 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1744 if (IS_IVYBRIDGE(dev))
Daniel Vetter277de952013-10-18 16:37:07 +02001745 ivb_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001746 else
Daniel Vetter277de952013-10-18 16:37:07 +02001747 hsw_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001748 }
1749 }
Shuang He8bf1e9f2013-10-15 18:55:27 +01001750
Paulo Zanoni86642812013-04-12 17:57:57 -03001751 I915_WRITE(GEN7_ERR_INT, err_int);
1752}
1753
1754static void cpt_serr_int_handler(struct drm_device *dev)
1755{
1756 struct drm_i915_private *dev_priv = dev->dev_private;
1757 u32 serr_int = I915_READ(SERR_INT);
1758
Paulo Zanonide032bf2013-04-12 17:57:58 -03001759 if (serr_int & SERR_INT_POISON)
1760 DRM_ERROR("PCH poison interrupt\n");
1761
Paulo Zanoni86642812013-04-12 17:57:57 -03001762 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1763 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1764 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001765 DRM_ERROR("PCH transcoder A FIFO underrun\n");
Paulo Zanoni86642812013-04-12 17:57:57 -03001766
1767 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1768 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1769 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001770 DRM_ERROR("PCH transcoder B FIFO underrun\n");
Paulo Zanoni86642812013-04-12 17:57:57 -03001771
1772 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1773 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1774 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001775 DRM_ERROR("PCH transcoder C FIFO underrun\n");
Paulo Zanoni86642812013-04-12 17:57:57 -03001776
1777 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08001778}
1779
Adam Jackson23e81d62012-06-06 15:45:44 -04001780static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1781{
1782 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1783 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001784 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04001785
Daniel Vetter91d131d2013-06-27 17:52:14 +02001786 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1787
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001788 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1789 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1790 SDE_AUDIO_POWER_SHIFT_CPT);
1791 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1792 port_name(port));
1793 }
Adam Jackson23e81d62012-06-06 15:45:44 -04001794
1795 if (pch_iir & SDE_AUX_MASK_CPT)
Daniel Vetterce99c252012-12-01 13:53:47 +01001796 dp_aux_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001797
1798 if (pch_iir & SDE_GMBUS_CPT)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001799 gmbus_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001800
1801 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1802 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1803
1804 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1805 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1806
1807 if (pch_iir & SDE_FDI_MASK_CPT)
1808 for_each_pipe(pipe)
1809 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1810 pipe_name(pipe),
1811 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03001812
1813 if (pch_iir & SDE_ERROR_CPT)
1814 cpt_serr_int_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001815}
1816
Paulo Zanonic008bc62013-07-12 16:35:10 -03001817static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1818{
1819 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter40da17c2013-10-21 18:04:36 +02001820 enum pipe pipe;
Paulo Zanonic008bc62013-07-12 16:35:10 -03001821
1822 if (de_iir & DE_AUX_CHANNEL_A)
1823 dp_aux_irq_handler(dev);
1824
1825 if (de_iir & DE_GSE)
1826 intel_opregion_asle_intr(dev);
1827
Paulo Zanonic008bc62013-07-12 16:35:10 -03001828 if (de_iir & DE_POISON)
1829 DRM_ERROR("Poison interrupt\n");
1830
Daniel Vetter40da17c2013-10-21 18:04:36 +02001831 for_each_pipe(pipe) {
1832 if (de_iir & DE_PIPE_VBLANK(pipe))
1833 drm_handle_vblank(dev, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03001834
Daniel Vetter40da17c2013-10-21 18:04:36 +02001835 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1836 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02001837 DRM_ERROR("Pipe %c FIFO underrun\n",
1838 pipe_name(pipe));
Paulo Zanonic008bc62013-07-12 16:35:10 -03001839
Daniel Vetter40da17c2013-10-21 18:04:36 +02001840 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1841 i9xx_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001842
Daniel Vetter40da17c2013-10-21 18:04:36 +02001843 /* plane/pipes map 1:1 on ilk+ */
1844 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1845 intel_prepare_page_flip(dev, pipe);
1846 intel_finish_page_flip_plane(dev, pipe);
1847 }
Paulo Zanonic008bc62013-07-12 16:35:10 -03001848 }
1849
1850 /* check event from PCH */
1851 if (de_iir & DE_PCH_EVENT) {
1852 u32 pch_iir = I915_READ(SDEIIR);
1853
1854 if (HAS_PCH_CPT(dev))
1855 cpt_irq_handler(dev, pch_iir);
1856 else
1857 ibx_irq_handler(dev, pch_iir);
1858
1859 /* should clear PCH hotplug event before clear CPU irq */
1860 I915_WRITE(SDEIIR, pch_iir);
1861 }
1862
1863 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1864 ironlake_rps_change_irq_handler(dev);
1865}
1866
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001867static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1868{
1869 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02001870 enum pipe i;
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001871
1872 if (de_iir & DE_ERR_INT_IVB)
1873 ivb_err_int_handler(dev);
1874
1875 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1876 dp_aux_irq_handler(dev);
1877
1878 if (de_iir & DE_GSE_IVB)
1879 intel_opregion_asle_intr(dev);
1880
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02001881 for_each_pipe(i) {
Daniel Vetter40da17c2013-10-21 18:04:36 +02001882 if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001883 drm_handle_vblank(dev, i);
Daniel Vetter40da17c2013-10-21 18:04:36 +02001884
1885 /* plane/pipes map 1:1 on ilk+ */
1886 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001887 intel_prepare_page_flip(dev, i);
1888 intel_finish_page_flip_plane(dev, i);
1889 }
1890 }
1891
1892 /* check event from PCH */
1893 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1894 u32 pch_iir = I915_READ(SDEIIR);
1895
1896 cpt_irq_handler(dev, pch_iir);
1897
1898 /* clear PCH hotplug event before clear CPU irq */
1899 I915_WRITE(SDEIIR, pch_iir);
1900 }
1901}
1902
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001903static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001904{
1905 struct drm_device *dev = (struct drm_device *) arg;
1906 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001907 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01001908 irqreturn_t ret = IRQ_NONE;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001909
Paulo Zanoni86642812013-04-12 17:57:57 -03001910 /* We get interrupts on unclaimed registers, so check for this before we
1911 * do any I915_{READ,WRITE}. */
Chris Wilson907b28c2013-07-19 20:36:52 +01001912 intel_uncore_check_errors(dev);
Paulo Zanoni86642812013-04-12 17:57:57 -03001913
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001914 /* disable master interrupt before clearing iir */
1915 de_ier = I915_READ(DEIER);
1916 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Paulo Zanoni23a78512013-07-12 16:35:14 -03001917 POSTING_READ(DEIER);
Chris Wilson0e434062012-05-09 21:45:44 +01001918
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001919 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1920 * interrupts will will be stored on its back queue, and then we'll be
1921 * able to process them after we restore SDEIER (as soon as we restore
1922 * it, we'll get an interrupt if SDEIIR still has something to process
1923 * due to its back queue). */
Ben Widawskyab5c6082013-04-05 13:12:41 -07001924 if (!HAS_PCH_NOP(dev)) {
1925 sde_ier = I915_READ(SDEIER);
1926 I915_WRITE(SDEIER, 0);
1927 POSTING_READ(SDEIER);
1928 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001929
Chris Wilson0e434062012-05-09 21:45:44 +01001930 gt_iir = I915_READ(GTIIR);
1931 if (gt_iir) {
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001932 if (INTEL_INFO(dev)->gen >= 6)
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001933 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001934 else
1935 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001936 I915_WRITE(GTIIR, gt_iir);
1937 ret = IRQ_HANDLED;
1938 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001939
1940 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01001941 if (de_iir) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001942 if (INTEL_INFO(dev)->gen >= 7)
1943 ivb_display_irq_handler(dev, de_iir);
1944 else
1945 ilk_display_irq_handler(dev, de_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001946 I915_WRITE(DEIIR, de_iir);
1947 ret = IRQ_HANDLED;
1948 }
1949
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001950 if (INTEL_INFO(dev)->gen >= 6) {
1951 u32 pm_iir = I915_READ(GEN6_PMIIR);
1952 if (pm_iir) {
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001953 gen6_rps_irq_handler(dev_priv, pm_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001954 I915_WRITE(GEN6_PMIIR, pm_iir);
1955 ret = IRQ_HANDLED;
1956 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001957 }
1958
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001959 I915_WRITE(DEIER, de_ier);
1960 POSTING_READ(DEIER);
Ben Widawskyab5c6082013-04-05 13:12:41 -07001961 if (!HAS_PCH_NOP(dev)) {
1962 I915_WRITE(SDEIER, sde_ier);
1963 POSTING_READ(SDEIER);
1964 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001965
1966 return ret;
1967}
1968
Ben Widawskyabd58f02013-11-02 21:07:09 -07001969static irqreturn_t gen8_irq_handler(int irq, void *arg)
1970{
1971 struct drm_device *dev = arg;
1972 struct drm_i915_private *dev_priv = dev->dev_private;
1973 u32 master_ctl;
1974 irqreturn_t ret = IRQ_NONE;
1975 uint32_t tmp = 0;
Daniel Vetterc42664c2013-11-07 11:05:40 +01001976 enum pipe pipe;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001977
Ben Widawskyabd58f02013-11-02 21:07:09 -07001978 master_ctl = I915_READ(GEN8_MASTER_IRQ);
1979 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
1980 if (!master_ctl)
1981 return IRQ_NONE;
1982
1983 I915_WRITE(GEN8_MASTER_IRQ, 0);
1984 POSTING_READ(GEN8_MASTER_IRQ);
1985
1986 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1987
1988 if (master_ctl & GEN8_DE_MISC_IRQ) {
1989 tmp = I915_READ(GEN8_DE_MISC_IIR);
1990 if (tmp & GEN8_DE_MISC_GSE)
1991 intel_opregion_asle_intr(dev);
1992 else if (tmp)
1993 DRM_ERROR("Unexpected DE Misc interrupt\n");
1994 else
1995 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
1996
1997 if (tmp) {
1998 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
1999 ret = IRQ_HANDLED;
2000 }
2001 }
2002
Daniel Vetter6d766f02013-11-07 14:49:55 +01002003 if (master_ctl & GEN8_DE_PORT_IRQ) {
2004 tmp = I915_READ(GEN8_DE_PORT_IIR);
2005 if (tmp & GEN8_AUX_CHANNEL_A)
2006 dp_aux_irq_handler(dev);
2007 else if (tmp)
2008 DRM_ERROR("Unexpected DE Port interrupt\n");
2009 else
2010 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2011
2012 if (tmp) {
2013 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2014 ret = IRQ_HANDLED;
2015 }
2016 }
2017
Daniel Vetterc42664c2013-11-07 11:05:40 +01002018 for_each_pipe(pipe) {
2019 uint32_t pipe_iir;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002020
Daniel Vetterc42664c2013-11-07 11:05:40 +01002021 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2022 continue;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002023
Daniel Vetterc42664c2013-11-07 11:05:40 +01002024 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2025 if (pipe_iir & GEN8_PIPE_VBLANK)
2026 drm_handle_vblank(dev, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002027
Daniel Vetterc42664c2013-11-07 11:05:40 +01002028 if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
2029 intel_prepare_page_flip(dev, pipe);
2030 intel_finish_page_flip_plane(dev, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002031 }
Daniel Vetterc42664c2013-11-07 11:05:40 +01002032
Daniel Vetter0fbe7872013-11-07 11:05:44 +01002033 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2034 hsw_pipe_crc_irq_handler(dev, pipe);
2035
Daniel Vetter38d83c962013-11-07 11:05:46 +01002036 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2037 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2038 false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02002039 DRM_ERROR("Pipe %c FIFO underrun\n",
2040 pipe_name(pipe));
Daniel Vetter38d83c962013-11-07 11:05:46 +01002041 }
2042
Daniel Vetter30100f22013-11-07 14:49:24 +01002043 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
2044 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2045 pipe_name(pipe),
2046 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2047 }
Daniel Vetterc42664c2013-11-07 11:05:40 +01002048
2049 if (pipe_iir) {
2050 ret = IRQ_HANDLED;
2051 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2052 } else
Ben Widawskyabd58f02013-11-02 21:07:09 -07002053 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2054 }
2055
Daniel Vetter92d03a82013-11-07 11:05:43 +01002056 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2057 /*
2058 * FIXME(BDW): Assume for now that the new interrupt handling
2059 * scheme also closed the SDE interrupt handling race we've seen
2060 * on older pch-split platforms. But this needs testing.
2061 */
2062 u32 pch_iir = I915_READ(SDEIIR);
2063
2064 cpt_irq_handler(dev, pch_iir);
2065
2066 if (pch_iir) {
2067 I915_WRITE(SDEIIR, pch_iir);
2068 ret = IRQ_HANDLED;
2069 }
2070 }
2071
Ben Widawskyabd58f02013-11-02 21:07:09 -07002072 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2073 POSTING_READ(GEN8_MASTER_IRQ);
2074
2075 return ret;
2076}
2077
Daniel Vetter17e1df02013-09-08 21:57:13 +02002078static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2079 bool reset_completed)
2080{
2081 struct intel_ring_buffer *ring;
2082 int i;
2083
2084 /*
2085 * Notify all waiters for GPU completion events that reset state has
2086 * been changed, and that they need to restart their wait after
2087 * checking for potential errors (and bail out to drop locks if there is
2088 * a gpu reset pending so that i915_error_work_func can acquire them).
2089 */
2090
2091 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2092 for_each_ring(ring, dev_priv, i)
2093 wake_up_all(&ring->irq_queue);
2094
2095 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2096 wake_up_all(&dev_priv->pending_flip_queue);
2097
2098 /*
2099 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2100 * reset state is cleared.
2101 */
2102 if (reset_completed)
2103 wake_up_all(&dev_priv->gpu_error.reset_queue);
2104}
2105
Jesse Barnes8a905232009-07-11 16:48:03 -04002106/**
2107 * i915_error_work_func - do process context error handling work
2108 * @work: work struct
2109 *
2110 * Fire an error uevent so userspace can see that a hang or error
2111 * was detected.
2112 */
2113static void i915_error_work_func(struct work_struct *work)
2114{
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002115 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2116 work);
2117 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
2118 gpu_error);
Jesse Barnes8a905232009-07-11 16:48:03 -04002119 struct drm_device *dev = dev_priv->dev;
Ben Widawskycce723e2013-07-19 09:16:42 -07002120 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2121 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2122 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
Daniel Vetter17e1df02013-09-08 21:57:13 +02002123 int ret;
Jesse Barnes8a905232009-07-11 16:48:03 -04002124
Dave Airlie5bdebb12013-10-11 14:07:25 +10002125 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04002126
Daniel Vetter7db0ba22012-12-06 16:23:37 +01002127 /*
2128 * Note that there's only one work item which does gpu resets, so we
2129 * need not worry about concurrent gpu resets potentially incrementing
2130 * error->reset_counter twice. We only need to take care of another
2131 * racing irq/hangcheck declaring the gpu dead for a second time. A
2132 * quick check for that is good enough: schedule_work ensures the
2133 * correct ordering between hang detection and this work item, and since
2134 * the reset in-progress bit is only ever set by code outside of this
2135 * work we don't need to worry about any other races.
2136 */
2137 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
Chris Wilsonf803aa52010-09-19 12:38:26 +01002138 DRM_DEBUG_DRIVER("resetting chip\n");
Dave Airlie5bdebb12013-10-11 14:07:25 +10002139 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
Daniel Vetter7db0ba22012-12-06 16:23:37 +01002140 reset_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002141
Daniel Vetter17e1df02013-09-08 21:57:13 +02002142 /*
2143 * All state reset _must_ be completed before we update the
2144 * reset counter, for otherwise waiters might miss the reset
2145 * pending state and not properly drop locks, resulting in
2146 * deadlocks with the reset work.
2147 */
Daniel Vetterf69061b2012-12-06 09:01:42 +01002148 ret = i915_reset(dev);
2149
Daniel Vetter17e1df02013-09-08 21:57:13 +02002150 intel_display_handle_reset(dev);
2151
Daniel Vetterf69061b2012-12-06 09:01:42 +01002152 if (ret == 0) {
2153 /*
2154 * After all the gem state is reset, increment the reset
2155 * counter and wake up everyone waiting for the reset to
2156 * complete.
2157 *
2158 * Since unlock operations are a one-sided barrier only,
2159 * we need to insert a barrier here to order any seqno
2160 * updates before
2161 * the counter increment.
2162 */
2163 smp_mb__before_atomic_inc();
2164 atomic_inc(&dev_priv->gpu_error.reset_counter);
2165
Dave Airlie5bdebb12013-10-11 14:07:25 +10002166 kobject_uevent_env(&dev->primary->kdev->kobj,
Daniel Vetterf69061b2012-12-06 09:01:42 +01002167 KOBJ_CHANGE, reset_done_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002168 } else {
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02002169 atomic_set_mask(I915_WEDGED, &error->reset_counter);
Ben Gamarif316a422009-09-14 17:48:46 -04002170 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002171
Daniel Vetter17e1df02013-09-08 21:57:13 +02002172 /*
2173 * Note: The wake_up also serves as a memory barrier so that
2174 * waiters see the update value of the reset counter atomic_t.
2175 */
2176 i915_error_wake_up(dev_priv, true);
Ben Gamarif316a422009-09-14 17:48:46 -04002177 }
Jesse Barnes8a905232009-07-11 16:48:03 -04002178}
2179
Chris Wilson35aed2e2010-05-27 13:18:12 +01002180static void i915_report_and_clear_eir(struct drm_device *dev)
Jesse Barnes8a905232009-07-11 16:48:03 -04002181{
2182 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskybd9854f2012-08-23 15:18:09 -07002183 uint32_t instdone[I915_NUM_INSTDONE_REG];
Jesse Barnes8a905232009-07-11 16:48:03 -04002184 u32 eir = I915_READ(EIR);
Ben Widawsky050ee912012-08-22 11:32:15 -07002185 int pipe, i;
Jesse Barnes8a905232009-07-11 16:48:03 -04002186
Chris Wilson35aed2e2010-05-27 13:18:12 +01002187 if (!eir)
2188 return;
Jesse Barnes8a905232009-07-11 16:48:03 -04002189
Joe Perchesa70491c2012-03-18 13:00:11 -07002190 pr_err("render error detected, EIR: 0x%08x\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04002191
Ben Widawskybd9854f2012-08-23 15:18:09 -07002192 i915_get_extra_instdone(dev, instdone);
2193
Jesse Barnes8a905232009-07-11 16:48:03 -04002194 if (IS_G4X(dev)) {
2195 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2196 u32 ipeir = I915_READ(IPEIR_I965);
2197
Joe Perchesa70491c2012-03-18 13:00:11 -07002198 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2199 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Ben Widawsky050ee912012-08-22 11:32:15 -07002200 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2201 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Joe Perchesa70491c2012-03-18 13:00:11 -07002202 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002203 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002204 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002205 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002206 }
2207 if (eir & GM45_ERROR_PAGE_TABLE) {
2208 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002209 pr_err("page table error\n");
2210 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002211 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002212 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002213 }
2214 }
2215
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002216 if (!IS_GEN2(dev)) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002217 if (eir & I915_ERROR_PAGE_TABLE) {
2218 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002219 pr_err("page table error\n");
2220 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002221 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002222 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002223 }
2224 }
2225
2226 if (eir & I915_ERROR_MEMORY_REFRESH) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002227 pr_err("memory refresh error:\n");
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002228 for_each_pipe(pipe)
Joe Perchesa70491c2012-03-18 13:00:11 -07002229 pr_err("pipe %c stat: 0x%08x\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002230 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
Jesse Barnes8a905232009-07-11 16:48:03 -04002231 /* pipestat has already been acked */
2232 }
2233 if (eir & I915_ERROR_INSTRUCTION) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002234 pr_err("instruction error\n");
2235 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
Ben Widawsky050ee912012-08-22 11:32:15 -07002236 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2237 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002238 if (INTEL_INFO(dev)->gen < 4) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002239 u32 ipeir = I915_READ(IPEIR);
2240
Joe Perchesa70491c2012-03-18 13:00:11 -07002241 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2242 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
Joe Perchesa70491c2012-03-18 13:00:11 -07002243 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
Jesse Barnes8a905232009-07-11 16:48:03 -04002244 I915_WRITE(IPEIR, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002245 POSTING_READ(IPEIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002246 } else {
2247 u32 ipeir = I915_READ(IPEIR_I965);
2248
Joe Perchesa70491c2012-03-18 13:00:11 -07002249 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2250 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Joe Perchesa70491c2012-03-18 13:00:11 -07002251 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002252 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002253 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002254 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002255 }
2256 }
2257
2258 I915_WRITE(EIR, eir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002259 POSTING_READ(EIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002260 eir = I915_READ(EIR);
2261 if (eir) {
2262 /*
2263 * some errors might have become stuck,
2264 * mask them.
2265 */
2266 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2267 I915_WRITE(EMR, I915_READ(EMR) | eir);
2268 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2269 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01002270}
2271
2272/**
2273 * i915_handle_error - handle an error interrupt
2274 * @dev: drm device
2275 *
2276 * Do some basic checking of regsiter state at error interrupt time and
2277 * dump it to the syslog. Also call i915_capture_error_state() to make
2278 * sure we get a record and make it available in debugfs. Fire a uevent
2279 * so userspace knows something bad happened (should trigger collection
2280 * of a ring dump etc.).
2281 */
Mika Kuoppala58174462014-02-25 17:11:26 +02002282void i915_handle_error(struct drm_device *dev, bool wedged,
2283 const char *fmt, ...)
Chris Wilson35aed2e2010-05-27 13:18:12 +01002284{
2285 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala58174462014-02-25 17:11:26 +02002286 va_list args;
2287 char error_msg[80];
Chris Wilson35aed2e2010-05-27 13:18:12 +01002288
Mika Kuoppala58174462014-02-25 17:11:26 +02002289 va_start(args, fmt);
2290 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2291 va_end(args);
2292
2293 i915_capture_error_state(dev, wedged, error_msg);
Chris Wilson35aed2e2010-05-27 13:18:12 +01002294 i915_report_and_clear_eir(dev);
Jesse Barnes8a905232009-07-11 16:48:03 -04002295
Ben Gamariba1234d2009-09-14 17:48:47 -04002296 if (wedged) {
Daniel Vetterf69061b2012-12-06 09:01:42 +01002297 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2298 &dev_priv->gpu_error.reset_counter);
Ben Gamariba1234d2009-09-14 17:48:47 -04002299
Ben Gamari11ed50e2009-09-14 17:48:45 -04002300 /*
Daniel Vetter17e1df02013-09-08 21:57:13 +02002301 * Wakeup waiting processes so that the reset work function
2302 * i915_error_work_func doesn't deadlock trying to grab various
2303 * locks. By bumping the reset counter first, the woken
2304 * processes will see a reset in progress and back off,
2305 * releasing their locks and then wait for the reset completion.
2306 * We must do this for _all_ gpu waiters that might hold locks
2307 * that the reset work needs to acquire.
2308 *
2309 * Note: The wake_up serves as the required memory barrier to
2310 * ensure that the waiters see the updated value of the reset
2311 * counter atomic_t.
Ben Gamari11ed50e2009-09-14 17:48:45 -04002312 */
Daniel Vetter17e1df02013-09-08 21:57:13 +02002313 i915_error_wake_up(dev_priv, false);
Ben Gamari11ed50e2009-09-14 17:48:45 -04002314 }
2315
Daniel Vetter122f46b2013-09-04 17:36:14 +02002316 /*
2317 * Our reset work can grab modeset locks (since it needs to reset the
2318 * state of outstanding pagelips). Hence it must not be run on our own
2319 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2320 * code will deadlock.
2321 */
2322 schedule_work(&dev_priv->gpu_error.work);
Jesse Barnes8a905232009-07-11 16:48:03 -04002323}
2324
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002325static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002326{
2327 drm_i915_private_t *dev_priv = dev->dev_private;
2328 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2329 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Chris Wilson05394f32010-11-08 19:18:58 +00002330 struct drm_i915_gem_object *obj;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002331 struct intel_unpin_work *work;
2332 unsigned long flags;
2333 bool stall_detected;
2334
2335 /* Ignore early vblank irqs */
2336 if (intel_crtc == NULL)
2337 return;
2338
2339 spin_lock_irqsave(&dev->event_lock, flags);
2340 work = intel_crtc->unpin_work;
2341
Chris Wilsone7d841c2012-12-03 11:36:30 +00002342 if (work == NULL ||
2343 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2344 !work->enable_stall_check) {
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002345 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2346 spin_unlock_irqrestore(&dev->event_lock, flags);
2347 return;
2348 }
2349
2350 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
Chris Wilson05394f32010-11-08 19:18:58 +00002351 obj = work->pending_flip_obj;
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002352 if (INTEL_INFO(dev)->gen >= 4) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002353 int dspsurf = DSPSURF(intel_crtc->plane);
Armin Reese446f2542012-03-30 16:20:16 -07002354 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002355 i915_gem_obj_ggtt_offset(obj);
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002356 } else {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002357 int dspaddr = DSPADDR(intel_crtc->plane);
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002358 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
Ville Syrjälä01f2c772011-12-20 00:06:49 +02002359 crtc->y * crtc->fb->pitches[0] +
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002360 crtc->x * crtc->fb->bits_per_pixel/8);
2361 }
2362
2363 spin_unlock_irqrestore(&dev->event_lock, flags);
2364
2365 if (stall_detected) {
2366 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2367 intel_prepare_page_flip(dev, intel_crtc->plane);
2368 }
2369}
2370
Keith Packard42f52ef2008-10-18 19:39:29 -07002371/* Called from drm generic code, passed 'crtc' which
2372 * we use as a pipe index
2373 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002374static int i915_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002375{
2376 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002377 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002378
Chris Wilson5eddb702010-09-11 13:48:45 +01002379 if (!i915_pipe_enabled(dev, pipe))
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002380 return -EINVAL;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002381
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002382 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002383 if (INTEL_INFO(dev)->gen >= 4)
Keith Packard7c463582008-11-04 02:03:27 -08002384 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002385 PIPE_START_VBLANK_INTERRUPT_STATUS);
Keith Packarde9d21d72008-10-16 11:31:38 -07002386 else
Keith Packard7c463582008-11-04 02:03:27 -08002387 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002388 PIPE_VBLANK_INTERRUPT_STATUS);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002389
2390 /* maintain vblank delivery even in deep C-states */
Damien Lespiau3d13ef22014-02-07 19:12:47 +00002391 if (INTEL_INFO(dev)->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002392 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002393 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002394
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002395 return 0;
2396}
2397
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002398static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002399{
2400 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2401 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002402 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c2013-10-21 18:04:36 +02002403 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002404
2405 if (!i915_pipe_enabled(dev, pipe))
2406 return -EINVAL;
2407
2408 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002409 ironlake_enable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002410 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2411
2412 return 0;
2413}
2414
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002415static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2416{
2417 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2418 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002419
2420 if (!i915_pipe_enabled(dev, pipe))
2421 return -EINVAL;
2422
2423 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002424 i915_enable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002425 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002426 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2427
2428 return 0;
2429}
2430
Ben Widawskyabd58f02013-11-02 21:07:09 -07002431static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2432{
2433 struct drm_i915_private *dev_priv = dev->dev_private;
2434 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002435
2436 if (!i915_pipe_enabled(dev, pipe))
2437 return -EINVAL;
2438
2439 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter7167d7c2013-11-07 11:05:45 +01002440 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2441 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2442 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
Ben Widawskyabd58f02013-11-02 21:07:09 -07002443 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2444 return 0;
2445}
2446
Keith Packard42f52ef2008-10-18 19:39:29 -07002447/* Called from drm generic code, passed 'crtc' which
2448 * we use as a pipe index
2449 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002450static void i915_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002451{
2452 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002453 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002454
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002455 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Damien Lespiau3d13ef22014-02-07 19:12:47 +00002456 if (INTEL_INFO(dev)->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002457 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson8692d00e2011-02-05 10:08:21 +00002458
Jesse Barnesf796cf82011-04-07 13:58:17 -07002459 i915_disable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002460 PIPE_VBLANK_INTERRUPT_STATUS |
2461 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002462 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2463}
2464
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002465static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002466{
2467 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2468 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002469 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c2013-10-21 18:04:36 +02002470 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002471
2472 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002473 ironlake_disable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002474 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2475}
2476
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002477static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2478{
2479 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2480 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002481
2482 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002483 i915_disable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002484 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002485 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2486}
2487
Ben Widawskyabd58f02013-11-02 21:07:09 -07002488static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2489{
2490 struct drm_i915_private *dev_priv = dev->dev_private;
2491 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002492
2493 if (!i915_pipe_enabled(dev, pipe))
2494 return;
2495
2496 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter7167d7c2013-11-07 11:05:45 +01002497 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2498 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2499 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
Ben Widawskyabd58f02013-11-02 21:07:09 -07002500 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2501}
2502
Chris Wilson893eead2010-10-27 14:44:35 +01002503static u32
2504ring_last_seqno(struct intel_ring_buffer *ring)
Zou Nan hai852835f2010-05-21 09:08:56 +08002505{
Chris Wilson893eead2010-10-27 14:44:35 +01002506 return list_entry(ring->request_list.prev,
2507 struct drm_i915_gem_request, list)->seqno;
2508}
2509
Chris Wilson9107e9d2013-06-10 11:20:20 +01002510static bool
2511ring_idle(struct intel_ring_buffer *ring, u32 seqno)
Chris Wilson893eead2010-10-27 14:44:35 +01002512{
Chris Wilson9107e9d2013-06-10 11:20:20 +01002513 return (list_empty(&ring->request_list) ||
2514 i915_seqno_passed(seqno, ring_last_seqno(ring)));
Ben Gamarif65d9422009-09-14 17:48:44 -04002515}
2516
Chris Wilson6274f212013-06-10 11:20:21 +01002517static struct intel_ring_buffer *
2518semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
Chris Wilsona24a11e2013-03-14 17:52:05 +02002519{
2520 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Chris Wilson6274f212013-06-10 11:20:21 +01002521 u32 cmd, ipehr, acthd, acthd_min;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002522
2523 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2524 if ((ipehr & ~(0x3 << 16)) !=
2525 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
Chris Wilson6274f212013-06-10 11:20:21 +01002526 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002527
2528 /* ACTHD is likely pointing to the dword after the actual command,
2529 * so scan backwards until we find the MBOX.
2530 */
Chris Wilson6274f212013-06-10 11:20:21 +01002531 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002532 acthd_min = max((int)acthd - 3 * 4, 0);
2533 do {
2534 cmd = ioread32(ring->virtual_start + acthd);
2535 if (cmd == ipehr)
2536 break;
2537
2538 acthd -= 4;
2539 if (acthd < acthd_min)
Chris Wilson6274f212013-06-10 11:20:21 +01002540 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002541 } while (1);
2542
Chris Wilson6274f212013-06-10 11:20:21 +01002543 *seqno = ioread32(ring->virtual_start+acthd+4)+1;
2544 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
Chris Wilsona24a11e2013-03-14 17:52:05 +02002545}
2546
Chris Wilson6274f212013-06-10 11:20:21 +01002547static int semaphore_passed(struct intel_ring_buffer *ring)
2548{
2549 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2550 struct intel_ring_buffer *signaller;
2551 u32 seqno, ctl;
2552
2553 ring->hangcheck.deadlock = true;
2554
2555 signaller = semaphore_waits_for(ring, &seqno);
2556 if (signaller == NULL || signaller->hangcheck.deadlock)
2557 return -1;
2558
2559 /* cursory check for an unkickable deadlock */
2560 ctl = I915_READ_CTL(signaller);
2561 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2562 return -1;
2563
2564 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2565}
2566
2567static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2568{
2569 struct intel_ring_buffer *ring;
2570 int i;
2571
2572 for_each_ring(ring, dev_priv, i)
2573 ring->hangcheck.deadlock = false;
2574}
2575
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002576static enum intel_ring_hangcheck_action
2577ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002578{
2579 struct drm_device *dev = ring->dev;
2580 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002581 u32 tmp;
2582
Chris Wilson6274f212013-06-10 11:20:21 +01002583 if (ring->hangcheck.acthd != acthd)
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002584 return HANGCHECK_ACTIVE;
Chris Wilson6274f212013-06-10 11:20:21 +01002585
Chris Wilson9107e9d2013-06-10 11:20:20 +01002586 if (IS_GEN2(dev))
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002587 return HANGCHECK_HUNG;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002588
2589 /* Is the chip hanging on a WAIT_FOR_EVENT?
2590 * If so we can simply poke the RB_WAIT bit
2591 * and break the hang. This should work on
2592 * all but the second generation chipsets.
2593 */
2594 tmp = I915_READ_CTL(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002595 if (tmp & RING_WAIT) {
Mika Kuoppala58174462014-02-25 17:11:26 +02002596 i915_handle_error(dev, false,
2597 "Kicking stuck wait on %s",
2598 ring->name);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002599 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002600 return HANGCHECK_KICK;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002601 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02002602
Chris Wilson6274f212013-06-10 11:20:21 +01002603 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2604 switch (semaphore_passed(ring)) {
2605 default:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002606 return HANGCHECK_HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002607 case 1:
Mika Kuoppala58174462014-02-25 17:11:26 +02002608 i915_handle_error(dev, false,
2609 "Kicking stuck semaphore on %s",
2610 ring->name);
Chris Wilson6274f212013-06-10 11:20:21 +01002611 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002612 return HANGCHECK_KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002613 case 0:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002614 return HANGCHECK_WAIT;
Chris Wilson6274f212013-06-10 11:20:21 +01002615 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002616 }
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002617
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002618 return HANGCHECK_HUNG;
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002619}
2620
Ben Gamarif65d9422009-09-14 17:48:44 -04002621/**
2622 * This is called when the chip hasn't reported back with completed
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002623 * batchbuffers in a long time. We keep track per ring seqno progress and
2624 * if there are no progress, hangcheck score for that ring is increased.
2625 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2626 * we kick the ring. If we see no progress on three subsequent calls
2627 * we assume chip is wedged and try to fix it by resetting the chip.
Ben Gamarif65d9422009-09-14 17:48:44 -04002628 */
Damien Lespiaua658b5d2013-08-08 22:28:56 +01002629static void i915_hangcheck_elapsed(unsigned long data)
Ben Gamarif65d9422009-09-14 17:48:44 -04002630{
2631 struct drm_device *dev = (struct drm_device *)data;
2632 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002633 struct intel_ring_buffer *ring;
Chris Wilsonb4519512012-05-11 14:29:30 +01002634 int i;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002635 int busy_count = 0, rings_hung = 0;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002636 bool stuck[I915_NUM_RINGS] = { 0 };
2637#define BUSY 1
2638#define KICK 5
2639#define HUNG 20
Chris Wilson893eead2010-10-27 14:44:35 +01002640
Jani Nikulad330a952014-01-21 11:24:25 +02002641 if (!i915.enable_hangcheck)
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07002642 return;
2643
Chris Wilsonb4519512012-05-11 14:29:30 +01002644 for_each_ring(ring, dev_priv, i) {
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002645 u32 seqno, acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002646 bool busy = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01002647
Chris Wilson6274f212013-06-10 11:20:21 +01002648 semaphore_clear_deadlocks(dev_priv);
2649
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002650 seqno = ring->get_seqno(ring, false);
2651 acthd = intel_ring_get_active_head(ring);
Chris Wilsond1e61e72012-04-10 17:00:41 +01002652
Chris Wilson9107e9d2013-06-10 11:20:20 +01002653 if (ring->hangcheck.seqno == seqno) {
2654 if (ring_idle(ring, seqno)) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002655 ring->hangcheck.action = HANGCHECK_IDLE;
2656
Chris Wilson9107e9d2013-06-10 11:20:20 +01002657 if (waitqueue_active(&ring->irq_queue)) {
2658 /* Issue a wake-up to catch stuck h/w. */
Chris Wilson094f9a52013-09-25 17:34:55 +01002659 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
Daniel Vetterf4adcd22013-10-28 09:24:13 +01002660 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2661 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2662 ring->name);
2663 else
2664 DRM_INFO("Fake missed irq on %s\n",
2665 ring->name);
Chris Wilson094f9a52013-09-25 17:34:55 +01002666 wake_up_all(&ring->irq_queue);
2667 }
2668 /* Safeguard against driver failure */
2669 ring->hangcheck.score += BUSY;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002670 } else
2671 busy = false;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002672 } else {
Chris Wilson6274f212013-06-10 11:20:21 +01002673 /* We always increment the hangcheck score
2674 * if the ring is busy and still processing
2675 * the same request, so that no single request
2676 * can run indefinitely (such as a chain of
2677 * batches). The only time we do not increment
2678 * the hangcheck score on this ring, if this
2679 * ring is in a legitimate wait for another
2680 * ring. In that case the waiting ring is a
2681 * victim and we want to be sure we catch the
2682 * right culprit. Then every time we do kick
2683 * the ring, add a small increment to the
2684 * score so that we can catch a batch that is
2685 * being repeatedly kicked and so responsible
2686 * for stalling the machine.
2687 */
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002688 ring->hangcheck.action = ring_stuck(ring,
2689 acthd);
2690
2691 switch (ring->hangcheck.action) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002692 case HANGCHECK_IDLE:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002693 case HANGCHECK_WAIT:
Chris Wilson6274f212013-06-10 11:20:21 +01002694 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002695 case HANGCHECK_ACTIVE:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002696 ring->hangcheck.score += BUSY;
Chris Wilson6274f212013-06-10 11:20:21 +01002697 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002698 case HANGCHECK_KICK:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002699 ring->hangcheck.score += KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002700 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002701 case HANGCHECK_HUNG:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002702 ring->hangcheck.score += HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002703 stuck[i] = true;
2704 break;
2705 }
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002706 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002707 } else {
Mika Kuoppalada661462013-09-06 16:03:28 +03002708 ring->hangcheck.action = HANGCHECK_ACTIVE;
2709
Chris Wilson9107e9d2013-06-10 11:20:20 +01002710 /* Gradually reduce the count so that we catch DoS
2711 * attempts across multiple batches.
2712 */
2713 if (ring->hangcheck.score > 0)
2714 ring->hangcheck.score--;
Chris Wilsond1e61e72012-04-10 17:00:41 +01002715 }
2716
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002717 ring->hangcheck.seqno = seqno;
2718 ring->hangcheck.acthd = acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002719 busy_count += busy;
Chris Wilson893eead2010-10-27 14:44:35 +01002720 }
Eric Anholtb9201c12010-01-08 14:25:16 -08002721
Mika Kuoppala92cab732013-05-24 17:16:07 +03002722 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002723 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
Daniel Vetterb8d88d12013-08-28 10:57:59 +02002724 DRM_INFO("%s on %s\n",
2725 stuck[i] ? "stuck" : "no progress",
2726 ring->name);
Chris Wilsona43adf02013-06-10 11:20:22 +01002727 rings_hung++;
Mika Kuoppala92cab732013-05-24 17:16:07 +03002728 }
2729 }
2730
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002731 if (rings_hung)
Mika Kuoppala58174462014-02-25 17:11:26 +02002732 return i915_handle_error(dev, true, "Ring hung");
Ben Gamarif65d9422009-09-14 17:48:44 -04002733
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002734 if (busy_count)
2735 /* Reset timer case chip hangs without another request
2736 * being added */
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002737 i915_queue_hangcheck(dev);
2738}
2739
2740void i915_queue_hangcheck(struct drm_device *dev)
2741{
2742 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulad330a952014-01-21 11:24:25 +02002743 if (!i915.enable_hangcheck)
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002744 return;
2745
2746 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2747 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
Ben Gamarif65d9422009-09-14 17:48:44 -04002748}
2749
Paulo Zanoni91738a92013-06-05 14:21:51 -03002750static void ibx_irq_preinstall(struct drm_device *dev)
2751{
2752 struct drm_i915_private *dev_priv = dev->dev_private;
2753
2754 if (HAS_PCH_NOP(dev))
2755 return;
2756
2757 /* south display irq */
2758 I915_WRITE(SDEIMR, 0xffffffff);
2759 /*
2760 * SDEIER is also touched by the interrupt handler to work around missed
2761 * PCH interrupts. Hence we can't update it after the interrupt handler
2762 * is enabled - instead we unconditionally enable all PCH interrupt
2763 * sources here, but then only unmask them as needed with SDEIMR.
2764 */
2765 I915_WRITE(SDEIER, 0xffffffff);
2766 POSTING_READ(SDEIER);
2767}
2768
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002769static void gen5_gt_irq_preinstall(struct drm_device *dev)
2770{
2771 struct drm_i915_private *dev_priv = dev->dev_private;
2772
2773 /* and GT */
2774 I915_WRITE(GTIMR, 0xffffffff);
2775 I915_WRITE(GTIER, 0x0);
2776 POSTING_READ(GTIER);
2777
2778 if (INTEL_INFO(dev)->gen >= 6) {
2779 /* and PM */
2780 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2781 I915_WRITE(GEN6_PMIER, 0x0);
2782 POSTING_READ(GEN6_PMIER);
2783 }
2784}
2785
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786/* drm_dma.h hooks
2787*/
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002788static void ironlake_irq_preinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002789{
2790 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2791
2792 I915_WRITE(HWSTAM, 0xeffe);
Daniel Vetterbdfcdb62012-01-05 01:05:26 +01002793
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002794 I915_WRITE(DEIMR, 0xffffffff);
2795 I915_WRITE(DEIER, 0x0);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002796 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002797
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002798 gen5_gt_irq_preinstall(dev);
Zhenyu Wangc6501562009-11-03 18:57:21 +00002799
Paulo Zanoni91738a92013-06-05 14:21:51 -03002800 ibx_irq_preinstall(dev);
Ben Widawsky7d991632013-05-28 19:22:25 -07002801}
2802
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002803static void valleyview_irq_preinstall(struct drm_device *dev)
2804{
2805 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2806 int pipe;
2807
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002808 /* VLV magic */
2809 I915_WRITE(VLV_IMR, 0);
2810 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2811 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2812 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2813
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002814 /* and GT */
2815 I915_WRITE(GTIIR, I915_READ(GTIIR));
2816 I915_WRITE(GTIIR, I915_READ(GTIIR));
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002817
2818 gen5_gt_irq_preinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002819
2820 I915_WRITE(DPINVGTT, 0xff);
2821
2822 I915_WRITE(PORT_HOTPLUG_EN, 0);
2823 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2824 for_each_pipe(pipe)
2825 I915_WRITE(PIPESTAT(pipe), 0xffff);
2826 I915_WRITE(VLV_IIR, 0xffffffff);
2827 I915_WRITE(VLV_IMR, 0xffffffff);
2828 I915_WRITE(VLV_IER, 0x0);
2829 POSTING_READ(VLV_IER);
2830}
2831
Ben Widawskyabd58f02013-11-02 21:07:09 -07002832static void gen8_irq_preinstall(struct drm_device *dev)
2833{
2834 struct drm_i915_private *dev_priv = dev->dev_private;
2835 int pipe;
2836
Ben Widawskyabd58f02013-11-02 21:07:09 -07002837 I915_WRITE(GEN8_MASTER_IRQ, 0);
2838 POSTING_READ(GEN8_MASTER_IRQ);
2839
2840 /* IIR can theoretically queue up two events. Be paranoid */
2841#define GEN8_IRQ_INIT_NDX(type, which) do { \
2842 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2843 POSTING_READ(GEN8_##type##_IMR(which)); \
2844 I915_WRITE(GEN8_##type##_IER(which), 0); \
2845 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2846 POSTING_READ(GEN8_##type##_IIR(which)); \
2847 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2848 } while (0)
2849
2850#define GEN8_IRQ_INIT(type) do { \
2851 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2852 POSTING_READ(GEN8_##type##_IMR); \
2853 I915_WRITE(GEN8_##type##_IER, 0); \
2854 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2855 POSTING_READ(GEN8_##type##_IIR); \
2856 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2857 } while (0)
2858
2859 GEN8_IRQ_INIT_NDX(GT, 0);
2860 GEN8_IRQ_INIT_NDX(GT, 1);
2861 GEN8_IRQ_INIT_NDX(GT, 2);
2862 GEN8_IRQ_INIT_NDX(GT, 3);
2863
2864 for_each_pipe(pipe) {
2865 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
2866 }
2867
2868 GEN8_IRQ_INIT(DE_PORT);
2869 GEN8_IRQ_INIT(DE_MISC);
2870 GEN8_IRQ_INIT(PCU);
2871#undef GEN8_IRQ_INIT
2872#undef GEN8_IRQ_INIT_NDX
2873
2874 POSTING_READ(GEN8_PCU_IIR);
Jesse Barnes09f23442014-01-10 13:13:09 -08002875
2876 ibx_irq_preinstall(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002877}
2878
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002879static void ibx_hpd_irq_setup(struct drm_device *dev)
Keith Packard7fe0b972011-09-19 13:31:02 -07002880{
2881 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002882 struct drm_mode_config *mode_config = &dev->mode_config;
2883 struct intel_encoder *intel_encoder;
Daniel Vetterfee884e2013-07-04 23:35:21 +02002884 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
Keith Packard7fe0b972011-09-19 13:31:02 -07002885
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002886 if (HAS_PCH_IBX(dev)) {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002887 hotplug_irqs = SDE_HOTPLUG_MASK;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002888 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002889 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002890 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002891 } else {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002892 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002893 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002894 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002895 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002896 }
2897
Daniel Vetterfee884e2013-07-04 23:35:21 +02002898 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002899
2900 /*
2901 * Enable digital hotplug on the PCH, and configure the DP short pulse
2902 * duration to 2ms (which is the minimum in the Display Port spec)
2903 *
2904 * This register is the same on all known PCH chips.
2905 */
Keith Packard7fe0b972011-09-19 13:31:02 -07002906 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2907 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2908 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2909 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2910 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2911 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2912}
2913
Paulo Zanonid46da432013-02-08 17:35:15 -02002914static void ibx_irq_postinstall(struct drm_device *dev)
2915{
2916 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002917 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02002918
Daniel Vetter692a04c2013-05-29 21:43:05 +02002919 if (HAS_PCH_NOP(dev))
2920 return;
2921
Paulo Zanoni86642812013-04-12 17:57:57 -03002922 if (HAS_PCH_IBX(dev)) {
2923 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
Paulo Zanonide032bf2013-04-12 17:57:58 -03002924 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
Paulo Zanoni86642812013-04-12 17:57:57 -03002925 } else {
2926 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2927
2928 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2929 }
Ben Widawskyab5c6082013-04-05 13:12:41 -07002930
Paulo Zanonid46da432013-02-08 17:35:15 -02002931 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2932 I915_WRITE(SDEIMR, ~mask);
Paulo Zanonid46da432013-02-08 17:35:15 -02002933}
2934
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002935static void gen5_gt_irq_postinstall(struct drm_device *dev)
2936{
2937 struct drm_i915_private *dev_priv = dev->dev_private;
2938 u32 pm_irqs, gt_irqs;
2939
2940 pm_irqs = gt_irqs = 0;
2941
2942 dev_priv->gt_irq_mask = ~0;
Ben Widawsky040d2ba2013-09-19 11:01:40 -07002943 if (HAS_L3_DPF(dev)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002944 /* L3 parity interrupt is always unmasked. */
Ben Widawsky35a85ac2013-09-19 11:13:41 -07002945 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
2946 gt_irqs |= GT_PARITY_ERROR(dev);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002947 }
2948
2949 gt_irqs |= GT_RENDER_USER_INTERRUPT;
2950 if (IS_GEN5(dev)) {
2951 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2952 ILK_BSD_USER_INTERRUPT;
2953 } else {
2954 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2955 }
2956
2957 I915_WRITE(GTIIR, I915_READ(GTIIR));
2958 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2959 I915_WRITE(GTIER, gt_irqs);
2960 POSTING_READ(GTIER);
2961
2962 if (INTEL_INFO(dev)->gen >= 6) {
2963 pm_irqs |= GEN6_PM_RPS_EVENTS;
2964
2965 if (HAS_VEBOX(dev))
2966 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2967
Paulo Zanoni605cd252013-08-06 18:57:15 -03002968 dev_priv->pm_irq_mask = 0xffffffff;
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002969 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
Paulo Zanoni605cd252013-08-06 18:57:15 -03002970 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002971 I915_WRITE(GEN6_PMIER, pm_irqs);
2972 POSTING_READ(GEN6_PMIER);
2973 }
2974}
2975
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002976static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002977{
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002978 unsigned long irqflags;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002979 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002980 u32 display_mask, extra_mask;
2981
2982 if (INTEL_INFO(dev)->gen >= 7) {
2983 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2984 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
2985 DE_PLANEB_FLIP_DONE_IVB |
2986 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2987 DE_ERR_INT_IVB);
2988 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2989 DE_PIPEA_VBLANK_IVB);
2990
2991 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2992 } else {
2993 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2994 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02002995 DE_AUX_CHANNEL_A |
2996 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
2997 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
2998 DE_POISON);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002999 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
3000 }
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003001
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003002 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003003
3004 /* should always can generate irq */
3005 I915_WRITE(DEIIR, I915_READ(DEIIR));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003006 I915_WRITE(DEIMR, dev_priv->irq_mask);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003007 I915_WRITE(DEIER, display_mask | extra_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +00003008 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003009
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003010 gen5_gt_irq_postinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003011
Paulo Zanonid46da432013-02-08 17:35:15 -02003012 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07003013
Jesse Barnesf97108d2010-01-29 11:27:07 -08003014 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02003015 /* Enable PCU event interrupts
3016 *
3017 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02003018 * setup is guaranteed to run in single-threaded context. But we
3019 * need it to make the assert_spin_locked happy. */
3020 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08003021 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetter4bc9d432013-06-27 13:44:58 +02003022 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08003023 }
3024
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003025 return 0;
3026}
3027
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003028static int valleyview_irq_postinstall(struct drm_device *dev)
3029{
3030 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003031 u32 enable_mask;
Imre Deak755e9012014-02-10 18:42:47 +02003032 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3033 PIPE_CRC_DONE_INTERRUPT_STATUS;
Daniel Vetterb79480b2013-06-27 17:52:10 +02003034 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003035
3036 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07003037 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3038 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
3039 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003040 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
3041
Jesse Barnes31acc7f2012-06-20 10:53:11 -07003042 /*
3043 *Leave vblank interrupts masked initially. enable/disable will
3044 * toggle them based on usage.
3045 */
3046 dev_priv->irq_mask = (~enable_mask) |
3047 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
3048 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003049
Daniel Vetter20afbda2012-12-11 14:05:07 +01003050 I915_WRITE(PORT_HOTPLUG_EN, 0);
3051 POSTING_READ(PORT_HOTPLUG_EN);
3052
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003053 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3054 I915_WRITE(VLV_IER, enable_mask);
3055 I915_WRITE(VLV_IIR, 0xffffffff);
3056 I915_WRITE(PIPESTAT(0), 0xffff);
3057 I915_WRITE(PIPESTAT(1), 0xffff);
3058 POSTING_READ(VLV_IER);
3059
Daniel Vetterb79480b2013-06-27 17:52:10 +02003060 /* Interrupt setup is already guaranteed to be single-threaded, this is
3061 * just to make the assert_spin_locked check happy. */
3062 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02003063 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
Imre Deak755e9012014-02-10 18:42:47 +02003064 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02003065 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
Daniel Vetterb79480b2013-06-27 17:52:10 +02003066 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07003067
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003068 I915_WRITE(VLV_IIR, 0xffffffff);
3069 I915_WRITE(VLV_IIR, 0xffffffff);
3070
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003071 gen5_gt_irq_postinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003072
3073 /* ack & enable invalid PTE error interrupts */
3074#if 0 /* FIXME: add support to irq handler for checking these bits */
3075 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3076 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3077#endif
3078
3079 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003080
3081 return 0;
3082}
3083
Ben Widawskyabd58f02013-11-02 21:07:09 -07003084static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3085{
3086 int i;
3087
3088 /* These are interrupts we'll toggle with the ring mask register */
3089 uint32_t gt_interrupts[] = {
3090 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3091 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3092 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3093 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3094 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3095 0,
3096 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3097 };
3098
3099 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
3100 u32 tmp = I915_READ(GEN8_GT_IIR(i));
3101 if (tmp)
3102 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
3103 i, tmp);
3104 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
3105 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
3106 }
3107 POSTING_READ(GEN8_GT_IER(0));
3108}
3109
3110static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3111{
3112 struct drm_device *dev = dev_priv->dev;
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01003113 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
3114 GEN8_PIPE_CDCLK_CRC_DONE |
3115 GEN8_PIPE_FIFO_UNDERRUN |
3116 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3117 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003118 int pipe;
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01003119 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3120 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3121 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003122
3123 for_each_pipe(pipe) {
3124 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
3125 if (tmp)
3126 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
3127 pipe, tmp);
3128 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
3129 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
3130 }
3131 POSTING_READ(GEN8_DE_PIPE_ISR(0));
3132
Daniel Vetter6d766f02013-11-07 14:49:55 +01003133 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
3134 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003135 POSTING_READ(GEN8_DE_PORT_IER);
3136}
3137
3138static int gen8_irq_postinstall(struct drm_device *dev)
3139{
3140 struct drm_i915_private *dev_priv = dev->dev_private;
3141
3142 gen8_gt_irq_postinstall(dev_priv);
3143 gen8_de_irq_postinstall(dev_priv);
3144
3145 ibx_irq_postinstall(dev);
3146
3147 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3148 POSTING_READ(GEN8_MASTER_IRQ);
3149
3150 return 0;
3151}
3152
3153static void gen8_irq_uninstall(struct drm_device *dev)
3154{
3155 struct drm_i915_private *dev_priv = dev->dev_private;
3156 int pipe;
3157
3158 if (!dev_priv)
3159 return;
3160
Ben Widawskyabd58f02013-11-02 21:07:09 -07003161 I915_WRITE(GEN8_MASTER_IRQ, 0);
3162
3163#define GEN8_IRQ_FINI_NDX(type, which) do { \
3164 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3165 I915_WRITE(GEN8_##type##_IER(which), 0); \
3166 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3167 } while (0)
3168
3169#define GEN8_IRQ_FINI(type) do { \
3170 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3171 I915_WRITE(GEN8_##type##_IER, 0); \
3172 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3173 } while (0)
3174
3175 GEN8_IRQ_FINI_NDX(GT, 0);
3176 GEN8_IRQ_FINI_NDX(GT, 1);
3177 GEN8_IRQ_FINI_NDX(GT, 2);
3178 GEN8_IRQ_FINI_NDX(GT, 3);
3179
3180 for_each_pipe(pipe) {
3181 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
3182 }
3183
3184 GEN8_IRQ_FINI(DE_PORT);
3185 GEN8_IRQ_FINI(DE_MISC);
3186 GEN8_IRQ_FINI(PCU);
3187#undef GEN8_IRQ_FINI
3188#undef GEN8_IRQ_FINI_NDX
3189
3190 POSTING_READ(GEN8_PCU_IIR);
3191}
3192
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003193static void valleyview_irq_uninstall(struct drm_device *dev)
3194{
3195 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3196 int pipe;
3197
3198 if (!dev_priv)
3199 return;
3200
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003201 intel_hpd_irq_uninstall(dev_priv);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003202
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003203 for_each_pipe(pipe)
3204 I915_WRITE(PIPESTAT(pipe), 0xffff);
3205
3206 I915_WRITE(HWSTAM, 0xffffffff);
3207 I915_WRITE(PORT_HOTPLUG_EN, 0);
3208 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3209 for_each_pipe(pipe)
3210 I915_WRITE(PIPESTAT(pipe), 0xffff);
3211 I915_WRITE(VLV_IIR, 0xffffffff);
3212 I915_WRITE(VLV_IMR, 0xffffffff);
3213 I915_WRITE(VLV_IER, 0x0);
3214 POSTING_READ(VLV_IER);
3215}
3216
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003217static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003218{
3219 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes46979952011-04-07 13:53:55 -07003220
3221 if (!dev_priv)
3222 return;
3223
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003224 intel_hpd_irq_uninstall(dev_priv);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003225
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003226 I915_WRITE(HWSTAM, 0xffffffff);
3227
3228 I915_WRITE(DEIMR, 0xffffffff);
3229 I915_WRITE(DEIER, 0x0);
3230 I915_WRITE(DEIIR, I915_READ(DEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03003231 if (IS_GEN7(dev))
3232 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003233
3234 I915_WRITE(GTIMR, 0xffffffff);
3235 I915_WRITE(GTIER, 0x0);
3236 I915_WRITE(GTIIR, I915_READ(GTIIR));
Keith Packard192aac1f2011-09-20 10:12:44 -07003237
Ben Widawskyab5c6082013-04-05 13:12:41 -07003238 if (HAS_PCH_NOP(dev))
3239 return;
3240
Keith Packard192aac1f2011-09-20 10:12:44 -07003241 I915_WRITE(SDEIMR, 0xffffffff);
3242 I915_WRITE(SDEIER, 0x0);
3243 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03003244 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3245 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003246}
3247
Chris Wilsonc2798b12012-04-22 21:13:57 +01003248static void i8xx_irq_preinstall(struct drm_device * dev)
3249{
3250 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3251 int pipe;
3252
Chris Wilsonc2798b12012-04-22 21:13:57 +01003253 for_each_pipe(pipe)
3254 I915_WRITE(PIPESTAT(pipe), 0);
3255 I915_WRITE16(IMR, 0xffff);
3256 I915_WRITE16(IER, 0x0);
3257 POSTING_READ16(IER);
3258}
3259
3260static int i8xx_irq_postinstall(struct drm_device *dev)
3261{
3262 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter379ef822013-10-16 22:55:56 +02003263 unsigned long irqflags;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003264
Chris Wilsonc2798b12012-04-22 21:13:57 +01003265 I915_WRITE16(EMR,
3266 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3267
3268 /* Unmask the interrupts that we always want on. */
3269 dev_priv->irq_mask =
3270 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3271 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3272 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3273 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3274 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3275 I915_WRITE16(IMR, dev_priv->irq_mask);
3276
3277 I915_WRITE16(IER,
3278 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3279 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3280 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3281 I915_USER_INTERRUPT);
3282 POSTING_READ16(IER);
3283
Daniel Vetter379ef822013-10-16 22:55:56 +02003284 /* Interrupt setup is already guaranteed to be single-threaded, this is
3285 * just to make the assert_spin_locked check happy. */
3286 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Imre Deak755e9012014-02-10 18:42:47 +02003287 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3288 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetter379ef822013-10-16 22:55:56 +02003289 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3290
Chris Wilsonc2798b12012-04-22 21:13:57 +01003291 return 0;
3292}
3293
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003294/*
3295 * Returns true when a page flip has completed.
3296 */
3297static bool i8xx_handle_vblank(struct drm_device *dev,
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003298 int plane, int pipe, u32 iir)
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003299{
3300 drm_i915_private_t *dev_priv = dev->dev_private;
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003301 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003302
3303 if (!drm_handle_vblank(dev, pipe))
3304 return false;
3305
3306 if ((iir & flip_pending) == 0)
3307 return false;
3308
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003309 intel_prepare_page_flip(dev, plane);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003310
3311 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3312 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3313 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3314 * the flip is completed (no longer pending). Since this doesn't raise
3315 * an interrupt per se, we watch for the change at vblank.
3316 */
3317 if (I915_READ16(ISR) & flip_pending)
3318 return false;
3319
3320 intel_finish_page_flip(dev, pipe);
3321
3322 return true;
3323}
3324
Daniel Vetterff1f5252012-10-02 15:10:55 +02003325static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003326{
3327 struct drm_device *dev = (struct drm_device *) arg;
3328 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003329 u16 iir, new_iir;
3330 u32 pipe_stats[2];
3331 unsigned long irqflags;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003332 int pipe;
3333 u16 flip_mask =
3334 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3335 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3336
Chris Wilsonc2798b12012-04-22 21:13:57 +01003337 iir = I915_READ16(IIR);
3338 if (iir == 0)
3339 return IRQ_NONE;
3340
3341 while (iir & ~flip_mask) {
3342 /* Can't rely on pipestat interrupt bit in iir as it might
3343 * have been cleared after the pipestat interrupt was received.
3344 * It doesn't set the bit in iir again, but it still produces
3345 * interrupts (for non-MSI).
3346 */
3347 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3348 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Mika Kuoppala58174462014-02-25 17:11:26 +02003349 i915_handle_error(dev, false,
3350 "Command parser error, iir 0x%08x",
3351 iir);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003352
3353 for_each_pipe(pipe) {
3354 int reg = PIPESTAT(pipe);
3355 pipe_stats[pipe] = I915_READ(reg);
3356
3357 /*
3358 * Clear the PIPE*STAT regs before the IIR
3359 */
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003360 if (pipe_stats[pipe] & 0x8000ffff)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003361 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003362 }
3363 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3364
3365 I915_WRITE16(IIR, iir & ~flip_mask);
3366 new_iir = I915_READ16(IIR); /* Flush posted writes */
3367
Daniel Vetterd05c6172012-04-26 23:28:09 +02003368 i915_update_dri1_breadcrumb(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003369
3370 if (iir & I915_USER_INTERRUPT)
3371 notify_ring(dev, &dev_priv->ring[RCS]);
3372
Daniel Vetter4356d582013-10-16 22:55:55 +02003373 for_each_pipe(pipe) {
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003374 int plane = pipe;
Daniel Vetter3a77c4c2014-01-10 08:50:12 +01003375 if (HAS_FBC(dev))
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003376 plane = !plane;
3377
Daniel Vetter4356d582013-10-16 22:55:55 +02003378 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003379 i8xx_handle_vblank(dev, plane, pipe, iir))
3380 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003381
Daniel Vetter4356d582013-10-16 22:55:55 +02003382 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003383 i9xx_pipe_crc_irq_handler(dev, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003384
3385 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3386 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02003387 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
Daniel Vetter4356d582013-10-16 22:55:55 +02003388 }
Chris Wilsonc2798b12012-04-22 21:13:57 +01003389
3390 iir = new_iir;
3391 }
3392
3393 return IRQ_HANDLED;
3394}
3395
3396static void i8xx_irq_uninstall(struct drm_device * dev)
3397{
3398 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3399 int pipe;
3400
Chris Wilsonc2798b12012-04-22 21:13:57 +01003401 for_each_pipe(pipe) {
3402 /* Clear enable bits; then clear status bits */
3403 I915_WRITE(PIPESTAT(pipe), 0);
3404 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3405 }
3406 I915_WRITE16(IMR, 0xffff);
3407 I915_WRITE16(IER, 0x0);
3408 I915_WRITE16(IIR, I915_READ16(IIR));
3409}
3410
Chris Wilsona266c7d2012-04-24 22:59:44 +01003411static void i915_irq_preinstall(struct drm_device * dev)
3412{
3413 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3414 int pipe;
3415
Chris Wilsona266c7d2012-04-24 22:59:44 +01003416 if (I915_HAS_HOTPLUG(dev)) {
3417 I915_WRITE(PORT_HOTPLUG_EN, 0);
3418 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3419 }
3420
Chris Wilson00d98eb2012-04-24 22:59:48 +01003421 I915_WRITE16(HWSTAM, 0xeffe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003422 for_each_pipe(pipe)
3423 I915_WRITE(PIPESTAT(pipe), 0);
3424 I915_WRITE(IMR, 0xffffffff);
3425 I915_WRITE(IER, 0x0);
3426 POSTING_READ(IER);
3427}
3428
3429static int i915_irq_postinstall(struct drm_device *dev)
3430{
3431 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson38bde182012-04-24 22:59:50 +01003432 u32 enable_mask;
Daniel Vetter379ef822013-10-16 22:55:56 +02003433 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003434
Chris Wilson38bde182012-04-24 22:59:50 +01003435 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3436
3437 /* Unmask the interrupts that we always want on. */
3438 dev_priv->irq_mask =
3439 ~(I915_ASLE_INTERRUPT |
3440 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3441 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3442 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3443 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3444 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3445
3446 enable_mask =
3447 I915_ASLE_INTERRUPT |
3448 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3449 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3450 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3451 I915_USER_INTERRUPT;
3452
Chris Wilsona266c7d2012-04-24 22:59:44 +01003453 if (I915_HAS_HOTPLUG(dev)) {
Daniel Vetter20afbda2012-12-11 14:05:07 +01003454 I915_WRITE(PORT_HOTPLUG_EN, 0);
3455 POSTING_READ(PORT_HOTPLUG_EN);
3456
Chris Wilsona266c7d2012-04-24 22:59:44 +01003457 /* Enable in IER... */
3458 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3459 /* and unmask in IMR */
3460 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3461 }
3462
Chris Wilsona266c7d2012-04-24 22:59:44 +01003463 I915_WRITE(IMR, dev_priv->irq_mask);
3464 I915_WRITE(IER, enable_mask);
3465 POSTING_READ(IER);
3466
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003467 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003468
Daniel Vetter379ef822013-10-16 22:55:56 +02003469 /* Interrupt setup is already guaranteed to be single-threaded, this is
3470 * just to make the assert_spin_locked check happy. */
3471 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Imre Deak755e9012014-02-10 18:42:47 +02003472 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3473 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetter379ef822013-10-16 22:55:56 +02003474 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3475
Daniel Vetter20afbda2012-12-11 14:05:07 +01003476 return 0;
3477}
3478
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003479/*
3480 * Returns true when a page flip has completed.
3481 */
3482static bool i915_handle_vblank(struct drm_device *dev,
3483 int plane, int pipe, u32 iir)
3484{
3485 drm_i915_private_t *dev_priv = dev->dev_private;
3486 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3487
3488 if (!drm_handle_vblank(dev, pipe))
3489 return false;
3490
3491 if ((iir & flip_pending) == 0)
3492 return false;
3493
3494 intel_prepare_page_flip(dev, plane);
3495
3496 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3497 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3498 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3499 * the flip is completed (no longer pending). Since this doesn't raise
3500 * an interrupt per se, we watch for the change at vblank.
3501 */
3502 if (I915_READ(ISR) & flip_pending)
3503 return false;
3504
3505 intel_finish_page_flip(dev, pipe);
3506
3507 return true;
3508}
3509
Daniel Vetterff1f5252012-10-02 15:10:55 +02003510static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003511{
3512 struct drm_device *dev = (struct drm_device *) arg;
3513 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson8291ee92012-04-24 22:59:47 +01003514 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003515 unsigned long irqflags;
Chris Wilson38bde182012-04-24 22:59:50 +01003516 u32 flip_mask =
3517 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3518 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilson38bde182012-04-24 22:59:50 +01003519 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003520
Chris Wilsona266c7d2012-04-24 22:59:44 +01003521 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01003522 do {
3523 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01003524 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003525
3526 /* Can't rely on pipestat interrupt bit in iir as it might
3527 * have been cleared after the pipestat interrupt was received.
3528 * It doesn't set the bit in iir again, but it still produces
3529 * interrupts (for non-MSI).
3530 */
3531 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3532 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Mika Kuoppala58174462014-02-25 17:11:26 +02003533 i915_handle_error(dev, false,
3534 "Command parser error, iir 0x%08x",
3535 iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003536
3537 for_each_pipe(pipe) {
3538 int reg = PIPESTAT(pipe);
3539 pipe_stats[pipe] = I915_READ(reg);
3540
Chris Wilson38bde182012-04-24 22:59:50 +01003541 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003542 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003543 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01003544 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003545 }
3546 }
3547 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3548
3549 if (!irq_received)
3550 break;
3551
Chris Wilsona266c7d2012-04-24 22:59:44 +01003552 /* Consume port. Then clear IIR or we'll miss events */
3553 if ((I915_HAS_HOTPLUG(dev)) &&
3554 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3555 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02003556 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003557
Daniel Vetter91d131d2013-06-27 17:52:14 +02003558 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3559
Chris Wilsona266c7d2012-04-24 22:59:44 +01003560 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
Chris Wilson38bde182012-04-24 22:59:50 +01003561 POSTING_READ(PORT_HOTPLUG_STAT);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003562 }
3563
Chris Wilson38bde182012-04-24 22:59:50 +01003564 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003565 new_iir = I915_READ(IIR); /* Flush posted writes */
3566
Chris Wilsona266c7d2012-04-24 22:59:44 +01003567 if (iir & I915_USER_INTERRUPT)
3568 notify_ring(dev, &dev_priv->ring[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003569
Chris Wilsona266c7d2012-04-24 22:59:44 +01003570 for_each_pipe(pipe) {
Chris Wilson38bde182012-04-24 22:59:50 +01003571 int plane = pipe;
Daniel Vetter3a77c4c2014-01-10 08:50:12 +01003572 if (HAS_FBC(dev))
Chris Wilson38bde182012-04-24 22:59:50 +01003573 plane = !plane;
Ville Syrjälä5e2032d2013-02-19 15:16:38 +02003574
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003575 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3576 i915_handle_vblank(dev, plane, pipe, iir))
3577 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003578
3579 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3580 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003581
3582 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003583 i9xx_pipe_crc_irq_handler(dev, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003584
3585 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3586 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02003587 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003588 }
3589
Chris Wilsona266c7d2012-04-24 22:59:44 +01003590 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3591 intel_opregion_asle_intr(dev);
3592
3593 /* With MSI, interrupts are only generated when iir
3594 * transitions from zero to nonzero. If another bit got
3595 * set while we were handling the existing iir bits, then
3596 * we would never get another interrupt.
3597 *
3598 * This is fine on non-MSI as well, as if we hit this path
3599 * we avoid exiting the interrupt handler only to generate
3600 * another one.
3601 *
3602 * Note that for MSI this could cause a stray interrupt report
3603 * if an interrupt landed in the time between writing IIR and
3604 * the posting read. This should be rare enough to never
3605 * trigger the 99% of 100,000 interrupts test for disabling
3606 * stray interrupts.
3607 */
Chris Wilson38bde182012-04-24 22:59:50 +01003608 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003609 iir = new_iir;
Chris Wilson38bde182012-04-24 22:59:50 +01003610 } while (iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003611
Daniel Vetterd05c6172012-04-26 23:28:09 +02003612 i915_update_dri1_breadcrumb(dev);
Chris Wilson8291ee92012-04-24 22:59:47 +01003613
Chris Wilsona266c7d2012-04-24 22:59:44 +01003614 return ret;
3615}
3616
3617static void i915_irq_uninstall(struct drm_device * dev)
3618{
3619 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3620 int pipe;
3621
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003622 intel_hpd_irq_uninstall(dev_priv);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003623
Chris Wilsona266c7d2012-04-24 22:59:44 +01003624 if (I915_HAS_HOTPLUG(dev)) {
3625 I915_WRITE(PORT_HOTPLUG_EN, 0);
3626 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3627 }
3628
Chris Wilson00d98eb2012-04-24 22:59:48 +01003629 I915_WRITE16(HWSTAM, 0xffff);
Chris Wilson55b39752012-04-24 22:59:49 +01003630 for_each_pipe(pipe) {
3631 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003632 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01003633 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3634 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003635 I915_WRITE(IMR, 0xffffffff);
3636 I915_WRITE(IER, 0x0);
3637
Chris Wilsona266c7d2012-04-24 22:59:44 +01003638 I915_WRITE(IIR, I915_READ(IIR));
3639}
3640
3641static void i965_irq_preinstall(struct drm_device * dev)
3642{
3643 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3644 int pipe;
3645
Chris Wilsonadca4732012-05-11 18:01:31 +01003646 I915_WRITE(PORT_HOTPLUG_EN, 0);
3647 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003648
3649 I915_WRITE(HWSTAM, 0xeffe);
3650 for_each_pipe(pipe)
3651 I915_WRITE(PIPESTAT(pipe), 0);
3652 I915_WRITE(IMR, 0xffffffff);
3653 I915_WRITE(IER, 0x0);
3654 POSTING_READ(IER);
3655}
3656
3657static int i965_irq_postinstall(struct drm_device *dev)
3658{
3659 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003660 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003661 u32 error_mask;
Daniel Vetterb79480b2013-06-27 17:52:10 +02003662 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003663
Chris Wilsona266c7d2012-04-24 22:59:44 +01003664 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003665 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01003666 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003667 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3668 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3669 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3670 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3671 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3672
3673 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003674 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3675 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003676 enable_mask |= I915_USER_INTERRUPT;
3677
3678 if (IS_G4X(dev))
3679 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003680
Daniel Vetterb79480b2013-06-27 17:52:10 +02003681 /* Interrupt setup is already guaranteed to be single-threaded, this is
3682 * just to make the assert_spin_locked check happy. */
3683 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Imre Deak755e9012014-02-10 18:42:47 +02003684 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3685 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3686 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterb79480b2013-06-27 17:52:10 +02003687 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003688
Chris Wilsona266c7d2012-04-24 22:59:44 +01003689 /*
3690 * Enable some error detection, note the instruction error mask
3691 * bit is reserved, so we leave it masked.
3692 */
3693 if (IS_G4X(dev)) {
3694 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3695 GM45_ERROR_MEM_PRIV |
3696 GM45_ERROR_CP_PRIV |
3697 I915_ERROR_MEMORY_REFRESH);
3698 } else {
3699 error_mask = ~(I915_ERROR_PAGE_TABLE |
3700 I915_ERROR_MEMORY_REFRESH);
3701 }
3702 I915_WRITE(EMR, error_mask);
3703
3704 I915_WRITE(IMR, dev_priv->irq_mask);
3705 I915_WRITE(IER, enable_mask);
3706 POSTING_READ(IER);
3707
Daniel Vetter20afbda2012-12-11 14:05:07 +01003708 I915_WRITE(PORT_HOTPLUG_EN, 0);
3709 POSTING_READ(PORT_HOTPLUG_EN);
3710
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003711 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003712
3713 return 0;
3714}
3715
Egbert Eichbac56d52013-02-25 12:06:51 -05003716static void i915_hpd_irq_setup(struct drm_device *dev)
Daniel Vetter20afbda2012-12-11 14:05:07 +01003717{
3718 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Egbert Eiche5868a32013-02-28 04:17:12 -05003719 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +02003720 struct intel_encoder *intel_encoder;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003721 u32 hotplug_en;
3722
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003723 assert_spin_locked(&dev_priv->irq_lock);
3724
Egbert Eichbac56d52013-02-25 12:06:51 -05003725 if (I915_HAS_HOTPLUG(dev)) {
3726 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3727 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3728 /* Note HDMI and DP share hotplug bits */
Egbert Eiche5868a32013-02-28 04:17:12 -05003729 /* enable bits are the same for all generations */
Egbert Eichcd569ae2013-04-16 13:36:57 +02003730 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3731 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3732 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
Egbert Eichbac56d52013-02-25 12:06:51 -05003733 /* Programming the CRT detection parameters tends
3734 to generate a spurious hotplug event about three
3735 seconds later. So just do it once.
3736 */
3737 if (IS_G4X(dev))
3738 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Daniel Vetter85fc95b2013-03-27 15:47:11 +01003739 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
Egbert Eichbac56d52013-02-25 12:06:51 -05003740 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003741
Egbert Eichbac56d52013-02-25 12:06:51 -05003742 /* Ignore TV since it's buggy */
3743 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3744 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003745}
3746
Daniel Vetterff1f5252012-10-02 15:10:55 +02003747static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003748{
3749 struct drm_device *dev = (struct drm_device *) arg;
3750 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003751 u32 iir, new_iir;
3752 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003753 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003754 int ret = IRQ_NONE, pipe;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003755 u32 flip_mask =
3756 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3757 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003758
Chris Wilsona266c7d2012-04-24 22:59:44 +01003759 iir = I915_READ(IIR);
3760
Chris Wilsona266c7d2012-04-24 22:59:44 +01003761 for (;;) {
Ville Syrjälä501e01d2014-01-17 11:35:15 +02003762 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson2c8ba292012-04-24 22:59:46 +01003763 bool blc_event = false;
3764
Chris Wilsona266c7d2012-04-24 22:59:44 +01003765 /* Can't rely on pipestat interrupt bit in iir as it might
3766 * have been cleared after the pipestat interrupt was received.
3767 * It doesn't set the bit in iir again, but it still produces
3768 * interrupts (for non-MSI).
3769 */
3770 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3771 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Mika Kuoppala58174462014-02-25 17:11:26 +02003772 i915_handle_error(dev, false,
3773 "Command parser error, iir 0x%08x",
3774 iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003775
3776 for_each_pipe(pipe) {
3777 int reg = PIPESTAT(pipe);
3778 pipe_stats[pipe] = I915_READ(reg);
3779
3780 /*
3781 * Clear the PIPE*STAT regs before the IIR
3782 */
3783 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003784 I915_WRITE(reg, pipe_stats[pipe]);
Ville Syrjälä501e01d2014-01-17 11:35:15 +02003785 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003786 }
3787 }
3788 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3789
3790 if (!irq_received)
3791 break;
3792
3793 ret = IRQ_HANDLED;
3794
3795 /* Consume port. Then clear IIR or we'll miss events */
Chris Wilsonadca4732012-05-11 18:01:31 +01003796 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003797 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02003798 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3799 HOTPLUG_INT_STATUS_G4X :
Daniel Vetter4f7fd702013-06-24 21:33:28 +02003800 HOTPLUG_INT_STATUS_I915);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003801
Daniel Vetter91d131d2013-06-27 17:52:14 +02003802 intel_hpd_irq_handler(dev, hotplug_trigger,
Daniel Vetter704cfb82013-12-18 09:08:43 +01003803 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
Daniel Vetter91d131d2013-06-27 17:52:14 +02003804
Daniel Vetter4aeebd72013-10-31 09:53:36 +01003805 if (IS_G4X(dev) &&
3806 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
3807 dp_aux_irq_handler(dev);
3808
Chris Wilsona266c7d2012-04-24 22:59:44 +01003809 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3810 I915_READ(PORT_HOTPLUG_STAT);
3811 }
3812
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003813 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003814 new_iir = I915_READ(IIR); /* Flush posted writes */
3815
Chris Wilsona266c7d2012-04-24 22:59:44 +01003816 if (iir & I915_USER_INTERRUPT)
3817 notify_ring(dev, &dev_priv->ring[RCS]);
3818 if (iir & I915_BSD_USER_INTERRUPT)
3819 notify_ring(dev, &dev_priv->ring[VCS]);
3820
Chris Wilsona266c7d2012-04-24 22:59:44 +01003821 for_each_pipe(pipe) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01003822 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003823 i915_handle_vblank(dev, pipe, pipe, iir))
3824 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003825
3826 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3827 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003828
3829 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003830 i9xx_pipe_crc_irq_handler(dev, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003831
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003832 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3833 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
Ville Syrjäläfc2c8072014-01-17 11:44:32 +02003834 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003835 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003836
3837 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3838 intel_opregion_asle_intr(dev);
3839
Daniel Vetter515ac2b2012-12-01 13:53:44 +01003840 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3841 gmbus_irq_handler(dev);
3842
Chris Wilsona266c7d2012-04-24 22:59:44 +01003843 /* With MSI, interrupts are only generated when iir
3844 * transitions from zero to nonzero. If another bit got
3845 * set while we were handling the existing iir bits, then
3846 * we would never get another interrupt.
3847 *
3848 * This is fine on non-MSI as well, as if we hit this path
3849 * we avoid exiting the interrupt handler only to generate
3850 * another one.
3851 *
3852 * Note that for MSI this could cause a stray interrupt report
3853 * if an interrupt landed in the time between writing IIR and
3854 * the posting read. This should be rare enough to never
3855 * trigger the 99% of 100,000 interrupts test for disabling
3856 * stray interrupts.
3857 */
3858 iir = new_iir;
3859 }
3860
Daniel Vetterd05c6172012-04-26 23:28:09 +02003861 i915_update_dri1_breadcrumb(dev);
Chris Wilson2c8ba292012-04-24 22:59:46 +01003862
Chris Wilsona266c7d2012-04-24 22:59:44 +01003863 return ret;
3864}
3865
3866static void i965_irq_uninstall(struct drm_device * dev)
3867{
3868 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3869 int pipe;
3870
3871 if (!dev_priv)
3872 return;
3873
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003874 intel_hpd_irq_uninstall(dev_priv);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003875
Chris Wilsonadca4732012-05-11 18:01:31 +01003876 I915_WRITE(PORT_HOTPLUG_EN, 0);
3877 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003878
3879 I915_WRITE(HWSTAM, 0xffffffff);
3880 for_each_pipe(pipe)
3881 I915_WRITE(PIPESTAT(pipe), 0);
3882 I915_WRITE(IMR, 0xffffffff);
3883 I915_WRITE(IER, 0x0);
3884
3885 for_each_pipe(pipe)
3886 I915_WRITE(PIPESTAT(pipe),
3887 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3888 I915_WRITE(IIR, I915_READ(IIR));
3889}
3890
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003891static void intel_hpd_irq_reenable(unsigned long data)
Egbert Eichac4c16c2013-04-16 13:36:58 +02003892{
3893 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3894 struct drm_device *dev = dev_priv->dev;
3895 struct drm_mode_config *mode_config = &dev->mode_config;
3896 unsigned long irqflags;
3897 int i;
3898
3899 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3900 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3901 struct drm_connector *connector;
3902
3903 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3904 continue;
3905
3906 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3907
3908 list_for_each_entry(connector, &mode_config->connector_list, head) {
3909 struct intel_connector *intel_connector = to_intel_connector(connector);
3910
3911 if (intel_connector->encoder->hpd_pin == i) {
3912 if (connector->polled != intel_connector->polled)
3913 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3914 drm_get_connector_name(connector));
3915 connector->polled = intel_connector->polled;
3916 if (!connector->polled)
3917 connector->polled = DRM_CONNECTOR_POLL_HPD;
3918 }
3919 }
3920 }
3921 if (dev_priv->display.hpd_irq_setup)
3922 dev_priv->display.hpd_irq_setup(dev);
3923 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3924}
3925
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003926void intel_irq_init(struct drm_device *dev)
3927{
Chris Wilson8b2e3262012-04-24 22:59:41 +01003928 struct drm_i915_private *dev_priv = dev->dev_private;
3929
3930 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
Daniel Vetter99584db2012-11-14 17:14:04 +01003931 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003932 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01003933 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Chris Wilson8b2e3262012-04-24 22:59:41 +01003934
Daniel Vetter99584db2012-11-14 17:14:04 +01003935 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3936 i915_hangcheck_elapsed,
Daniel Vetter61bac782012-12-01 21:03:21 +01003937 (unsigned long) dev);
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003938 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
Egbert Eichac4c16c2013-04-16 13:36:58 +02003939 (unsigned long) dev_priv);
Daniel Vetter61bac782012-12-01 21:03:21 +01003940
Tomas Janousek97a19a22012-12-08 13:48:13 +01003941 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01003942
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +03003943 if (IS_GEN2(dev)) {
3944 dev->max_vblank_count = 0;
3945 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
3946 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003947 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3948 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
Ville Syrjälä391f75e2013-09-25 19:55:26 +03003949 } else {
3950 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3951 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003952 }
3953
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03003954 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
Keith Packardc3613de2011-08-12 17:05:54 -07003955 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03003956 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3957 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003958
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003959 if (IS_VALLEYVIEW(dev)) {
3960 dev->driver->irq_handler = valleyview_irq_handler;
3961 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3962 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3963 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3964 dev->driver->enable_vblank = valleyview_enable_vblank;
3965 dev->driver->disable_vblank = valleyview_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05003966 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003967 } else if (IS_GEN8(dev)) {
3968 dev->driver->irq_handler = gen8_irq_handler;
3969 dev->driver->irq_preinstall = gen8_irq_preinstall;
3970 dev->driver->irq_postinstall = gen8_irq_postinstall;
3971 dev->driver->irq_uninstall = gen8_irq_uninstall;
3972 dev->driver->enable_vblank = gen8_enable_vblank;
3973 dev->driver->disable_vblank = gen8_disable_vblank;
3974 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003975 } else if (HAS_PCH_SPLIT(dev)) {
3976 dev->driver->irq_handler = ironlake_irq_handler;
3977 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3978 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3979 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3980 dev->driver->enable_vblank = ironlake_enable_vblank;
3981 dev->driver->disable_vblank = ironlake_disable_vblank;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003982 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003983 } else {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003984 if (INTEL_INFO(dev)->gen == 2) {
3985 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3986 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3987 dev->driver->irq_handler = i8xx_irq_handler;
3988 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003989 } else if (INTEL_INFO(dev)->gen == 3) {
3990 dev->driver->irq_preinstall = i915_irq_preinstall;
3991 dev->driver->irq_postinstall = i915_irq_postinstall;
3992 dev->driver->irq_uninstall = i915_irq_uninstall;
3993 dev->driver->irq_handler = i915_irq_handler;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003994 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003995 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003996 dev->driver->irq_preinstall = i965_irq_preinstall;
3997 dev->driver->irq_postinstall = i965_irq_postinstall;
3998 dev->driver->irq_uninstall = i965_irq_uninstall;
3999 dev->driver->irq_handler = i965_irq_handler;
Egbert Eichbac56d52013-02-25 12:06:51 -05004000 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004001 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004002 dev->driver->enable_vblank = i915_enable_vblank;
4003 dev->driver->disable_vblank = i915_disable_vblank;
4004 }
4005}
Daniel Vetter20afbda2012-12-11 14:05:07 +01004006
4007void intel_hpd_init(struct drm_device *dev)
4008{
4009 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eich821450c2013-04-16 13:36:55 +02004010 struct drm_mode_config *mode_config = &dev->mode_config;
4011 struct drm_connector *connector;
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02004012 unsigned long irqflags;
Egbert Eich821450c2013-04-16 13:36:55 +02004013 int i;
Daniel Vetter20afbda2012-12-11 14:05:07 +01004014
Egbert Eich821450c2013-04-16 13:36:55 +02004015 for (i = 1; i < HPD_NUM_PINS; i++) {
4016 dev_priv->hpd_stats[i].hpd_cnt = 0;
4017 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4018 }
4019 list_for_each_entry(connector, &mode_config->connector_list, head) {
4020 struct intel_connector *intel_connector = to_intel_connector(connector);
4021 connector->polled = intel_connector->polled;
4022 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4023 connector->polled = DRM_CONNECTOR_POLL_HPD;
4024 }
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02004025
4026 /* Interrupt setup is already guaranteed to be single-threaded, this is
4027 * just to make the assert_spin_locked checks happy. */
4028 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004029 if (dev_priv->display.hpd_irq_setup)
4030 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02004031 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004032}
Paulo Zanonic67a4702013-08-19 13:18:09 -03004033
4034/* Disable interrupts so we can allow Package C8+. */
4035void hsw_pc8_disable_interrupts(struct drm_device *dev)
4036{
4037 struct drm_i915_private *dev_priv = dev->dev_private;
4038 unsigned long irqflags;
4039
4040 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4041
4042 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
4043 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
4044 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
4045 dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
4046 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
4047
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02004048 ironlake_disable_display_irq(dev_priv, 0xffffffff);
4049 ibx_disable_display_interrupt(dev_priv, 0xffffffff);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004050 ilk_disable_gt_irq(dev_priv, 0xffffffff);
4051 snb_disable_pm_irq(dev_priv, 0xffffffff);
4052
4053 dev_priv->pc8.irqs_disabled = true;
4054
4055 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4056}
4057
4058/* Restore interrupts so we can recover from Package C8+. */
4059void hsw_pc8_restore_interrupts(struct drm_device *dev)
4060{
4061 struct drm_i915_private *dev_priv = dev->dev_private;
4062 unsigned long irqflags;
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02004063 uint32_t val;
Paulo Zanonic67a4702013-08-19 13:18:09 -03004064
4065 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4066
4067 val = I915_READ(DEIMR);
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02004068 WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004069
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02004070 val = I915_READ(SDEIMR);
4071 WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004072
4073 val = I915_READ(GTIMR);
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02004074 WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004075
4076 val = I915_READ(GEN6_PMIMR);
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02004077 WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004078
4079 dev_priv->pc8.irqs_disabled = false;
4080
4081 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02004082 ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004083 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
4084 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
4085 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
4086
4087 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4088}