blob: cbccadd9cd0d15be0d31f8feb7cdcb059d8da7c1 [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Damien Lespiaub2c88f52013-10-15 18:55:29 +010033#include <linux/circ_buf.h>
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/drmP.h>
35#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010037#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Egbert Eiche5868a32013-02-28 04:17:12 -050040static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46};
47
48static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010050 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050051 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54};
55
56static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63};
64
Daniel Vetter704cfb82013-12-18 09:08:43 +010065static const u32 hpd_status_g4x[] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050066 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72};
73
Egbert Eiche5868a32013-02-28 04:17:12 -050074static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81};
82
Zhenyu Wang036a4a72009-06-08 14:40:19 +080083/* For display hotplug interrupt */
Chris Wilson995b6762010-08-20 13:23:26 +010084static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -050085ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +080086{
Daniel Vetter4bc9d432013-06-27 13:44:58 +020087 assert_spin_locked(&dev_priv->irq_lock);
88
Paulo Zanonic67a4702013-08-19 13:18:09 -030089 if (dev_priv->pc8.irqs_disabled) {
90 WARN(1, "IRQs disabled\n");
91 dev_priv->pc8.regsave.deimr &= ~mask;
92 return;
93 }
94
Chris Wilson1ec14ad2010-12-04 11:30:53 +000095 if ((dev_priv->irq_mask & mask) != 0) {
96 dev_priv->irq_mask &= ~mask;
97 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +000098 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +080099 }
100}
101
Paulo Zanoni0ff98002013-02-22 17:05:31 -0300102static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -0500103ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800104{
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200105 assert_spin_locked(&dev_priv->irq_lock);
106
Paulo Zanonic67a4702013-08-19 13:18:09 -0300107 if (dev_priv->pc8.irqs_disabled) {
108 WARN(1, "IRQs disabled\n");
109 dev_priv->pc8.regsave.deimr |= mask;
110 return;
111 }
112
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000113 if ((dev_priv->irq_mask & mask) != mask) {
114 dev_priv->irq_mask |= mask;
115 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000116 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800117 }
118}
119
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300120/**
121 * ilk_update_gt_irq - update GTIMR
122 * @dev_priv: driver private
123 * @interrupt_mask: mask of interrupt bits to update
124 * @enabled_irq_mask: mask of interrupt bits to enable
125 */
126static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
127 uint32_t interrupt_mask,
128 uint32_t enabled_irq_mask)
129{
130 assert_spin_locked(&dev_priv->irq_lock);
131
Paulo Zanonic67a4702013-08-19 13:18:09 -0300132 if (dev_priv->pc8.irqs_disabled) {
133 WARN(1, "IRQs disabled\n");
134 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
135 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
136 interrupt_mask);
137 return;
138 }
139
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300140 dev_priv->gt_irq_mask &= ~interrupt_mask;
141 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
142 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
143 POSTING_READ(GTIMR);
144}
145
146void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
147{
148 ilk_update_gt_irq(dev_priv, mask, mask);
149}
150
151void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
152{
153 ilk_update_gt_irq(dev_priv, mask, 0);
154}
155
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300156/**
157 * snb_update_pm_irq - update GEN6_PMIMR
158 * @dev_priv: driver private
159 * @interrupt_mask: mask of interrupt bits to update
160 * @enabled_irq_mask: mask of interrupt bits to enable
161 */
162static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
163 uint32_t interrupt_mask,
164 uint32_t enabled_irq_mask)
165{
Paulo Zanoni605cd252013-08-06 18:57:15 -0300166 uint32_t new_val;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300167
168 assert_spin_locked(&dev_priv->irq_lock);
169
Paulo Zanonic67a4702013-08-19 13:18:09 -0300170 if (dev_priv->pc8.irqs_disabled) {
171 WARN(1, "IRQs disabled\n");
172 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
173 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
174 interrupt_mask);
175 return;
176 }
177
Paulo Zanoni605cd252013-08-06 18:57:15 -0300178 new_val = dev_priv->pm_irq_mask;
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300179 new_val &= ~interrupt_mask;
180 new_val |= (~enabled_irq_mask & interrupt_mask);
181
Paulo Zanoni605cd252013-08-06 18:57:15 -0300182 if (new_val != dev_priv->pm_irq_mask) {
183 dev_priv->pm_irq_mask = new_val;
184 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300185 POSTING_READ(GEN6_PMIMR);
186 }
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300187}
188
189void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
190{
191 snb_update_pm_irq(dev_priv, mask, mask);
192}
193
194void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
195{
196 snb_update_pm_irq(dev_priv, mask, 0);
197}
198
Paulo Zanoni86642812013-04-12 17:57:57 -0300199static bool ivb_can_enable_err_int(struct drm_device *dev)
200{
201 struct drm_i915_private *dev_priv = dev->dev_private;
202 struct intel_crtc *crtc;
203 enum pipe pipe;
204
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200205 assert_spin_locked(&dev_priv->irq_lock);
206
Paulo Zanoni86642812013-04-12 17:57:57 -0300207 for_each_pipe(pipe) {
208 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
209
210 if (crtc->cpu_fifo_underrun_disabled)
211 return false;
212 }
213
214 return true;
215}
216
217static bool cpt_can_enable_serr_int(struct drm_device *dev)
218{
219 struct drm_i915_private *dev_priv = dev->dev_private;
220 enum pipe pipe;
221 struct intel_crtc *crtc;
222
Daniel Vetterfee884e2013-07-04 23:35:21 +0200223 assert_spin_locked(&dev_priv->irq_lock);
224
Paulo Zanoni86642812013-04-12 17:57:57 -0300225 for_each_pipe(pipe) {
226 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
227
228 if (crtc->pch_fifo_underrun_disabled)
229 return false;
230 }
231
232 return true;
233}
234
235static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
236 enum pipe pipe, bool enable)
237{
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
240 DE_PIPEB_FIFO_UNDERRUN;
241
242 if (enable)
243 ironlake_enable_display_irq(dev_priv, bit);
244 else
245 ironlake_disable_display_irq(dev_priv, bit);
246}
247
248static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
Daniel Vetter7336df62013-07-09 22:59:16 +0200249 enum pipe pipe, bool enable)
Paulo Zanoni86642812013-04-12 17:57:57 -0300250{
251 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni86642812013-04-12 17:57:57 -0300252 if (enable) {
Daniel Vetter7336df62013-07-09 22:59:16 +0200253 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
254
Paulo Zanoni86642812013-04-12 17:57:57 -0300255 if (!ivb_can_enable_err_int(dev))
256 return;
257
Paulo Zanoni86642812013-04-12 17:57:57 -0300258 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
259 } else {
Daniel Vetter7336df62013-07-09 22:59:16 +0200260 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
261
262 /* Change the state _after_ we've read out the current one. */
Paulo Zanoni86642812013-04-12 17:57:57 -0300263 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
Daniel Vetter7336df62013-07-09 22:59:16 +0200264
265 if (!was_enabled &&
266 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
267 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
268 pipe_name(pipe));
269 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300270 }
271}
272
Daniel Vetter38d83c962013-11-07 11:05:46 +0100273static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
274 enum pipe pipe, bool enable)
275{
276 struct drm_i915_private *dev_priv = dev->dev_private;
277
278 assert_spin_locked(&dev_priv->irq_lock);
279
280 if (enable)
281 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
282 else
283 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
284 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
285 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
286}
287
Daniel Vetterfee884e2013-07-04 23:35:21 +0200288/**
289 * ibx_display_interrupt_update - update SDEIMR
290 * @dev_priv: driver private
291 * @interrupt_mask: mask of interrupt bits to update
292 * @enabled_irq_mask: mask of interrupt bits to enable
293 */
294static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
295 uint32_t interrupt_mask,
296 uint32_t enabled_irq_mask)
297{
298 uint32_t sdeimr = I915_READ(SDEIMR);
299 sdeimr &= ~interrupt_mask;
300 sdeimr |= (~enabled_irq_mask & interrupt_mask);
301
302 assert_spin_locked(&dev_priv->irq_lock);
303
Paulo Zanonic67a4702013-08-19 13:18:09 -0300304 if (dev_priv->pc8.irqs_disabled &&
305 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
306 WARN(1, "IRQs disabled\n");
307 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
308 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
309 interrupt_mask);
310 return;
311 }
312
Daniel Vetterfee884e2013-07-04 23:35:21 +0200313 I915_WRITE(SDEIMR, sdeimr);
314 POSTING_READ(SDEIMR);
315}
316#define ibx_enable_display_interrupt(dev_priv, bits) \
317 ibx_display_interrupt_update((dev_priv), (bits), (bits))
318#define ibx_disable_display_interrupt(dev_priv, bits) \
319 ibx_display_interrupt_update((dev_priv), (bits), 0)
320
Daniel Vetterde280752013-07-04 23:35:24 +0200321static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
322 enum transcoder pch_transcoder,
Paulo Zanoni86642812013-04-12 17:57:57 -0300323 bool enable)
324{
Paulo Zanoni86642812013-04-12 17:57:57 -0300325 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200326 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
327 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
Paulo Zanoni86642812013-04-12 17:57:57 -0300328
329 if (enable)
Daniel Vetterfee884e2013-07-04 23:35:21 +0200330 ibx_enable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300331 else
Daniel Vetterfee884e2013-07-04 23:35:21 +0200332 ibx_disable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300333}
334
335static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
336 enum transcoder pch_transcoder,
337 bool enable)
338{
339 struct drm_i915_private *dev_priv = dev->dev_private;
340
341 if (enable) {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200342 I915_WRITE(SERR_INT,
343 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
344
Paulo Zanoni86642812013-04-12 17:57:57 -0300345 if (!cpt_can_enable_serr_int(dev))
346 return;
347
Daniel Vetterfee884e2013-07-04 23:35:21 +0200348 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Paulo Zanoni86642812013-04-12 17:57:57 -0300349 } else {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200350 uint32_t tmp = I915_READ(SERR_INT);
351 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
352
353 /* Change the state _after_ we've read out the current one. */
Daniel Vetterfee884e2013-07-04 23:35:21 +0200354 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200355
356 if (!was_enabled &&
357 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
358 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
359 transcoder_name(pch_transcoder));
360 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300361 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300362}
363
364/**
365 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
366 * @dev: drm device
367 * @pipe: pipe
368 * @enable: true if we want to report FIFO underrun errors, false otherwise
369 *
370 * This function makes us disable or enable CPU fifo underruns for a specific
371 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
372 * reporting for one pipe may also disable all the other CPU error interruts for
373 * the other pipes, due to the fact that there's just one interrupt mask/enable
374 * bit for all the pipes.
375 *
376 * Returns the previous state of underrun reporting.
377 */
378bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
379 enum pipe pipe, bool enable)
380{
381 struct drm_i915_private *dev_priv = dev->dev_private;
382 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
383 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
384 unsigned long flags;
385 bool ret;
386
387 spin_lock_irqsave(&dev_priv->irq_lock, flags);
388
389 ret = !intel_crtc->cpu_fifo_underrun_disabled;
390
391 if (enable == ret)
392 goto done;
393
394 intel_crtc->cpu_fifo_underrun_disabled = !enable;
395
396 if (IS_GEN5(dev) || IS_GEN6(dev))
397 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
398 else if (IS_GEN7(dev))
Daniel Vetter7336df62013-07-09 22:59:16 +0200399 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
Daniel Vetter38d83c962013-11-07 11:05:46 +0100400 else if (IS_GEN8(dev))
401 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300402
403done:
404 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
405 return ret;
406}
407
408/**
409 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
410 * @dev: drm device
411 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
412 * @enable: true if we want to report FIFO underrun errors, false otherwise
413 *
414 * This function makes us disable or enable PCH fifo underruns for a specific
415 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
416 * underrun reporting for one transcoder may also disable all the other PCH
417 * error interruts for the other transcoders, due to the fact that there's just
418 * one interrupt mask/enable bit for all the transcoders.
419 *
420 * Returns the previous state of underrun reporting.
421 */
422bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
423 enum transcoder pch_transcoder,
424 bool enable)
425{
426 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200427 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
428 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni86642812013-04-12 17:57:57 -0300429 unsigned long flags;
430 bool ret;
431
Daniel Vetterde280752013-07-04 23:35:24 +0200432 /*
433 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
434 * has only one pch transcoder A that all pipes can use. To avoid racy
435 * pch transcoder -> pipe lookups from interrupt code simply store the
436 * underrun statistics in crtc A. Since we never expose this anywhere
437 * nor use it outside of the fifo underrun code here using the "wrong"
438 * crtc on LPT won't cause issues.
439 */
Paulo Zanoni86642812013-04-12 17:57:57 -0300440
441 spin_lock_irqsave(&dev_priv->irq_lock, flags);
442
443 ret = !intel_crtc->pch_fifo_underrun_disabled;
444
445 if (enable == ret)
446 goto done;
447
448 intel_crtc->pch_fifo_underrun_disabled = !enable;
449
450 if (HAS_PCH_IBX(dev))
Daniel Vetterde280752013-07-04 23:35:24 +0200451 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300452 else
453 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
454
455done:
456 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
457 return ret;
458}
459
460
Keith Packard7c463582008-11-04 02:03:27 -0800461void
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200462i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
Keith Packard7c463582008-11-04 02:03:27 -0800463{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200464 u32 reg = PIPESTAT(pipe);
465 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800466
Daniel Vetterb79480b2013-06-27 17:52:10 +0200467 assert_spin_locked(&dev_priv->irq_lock);
468
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200469 if ((pipestat & mask) == mask)
470 return;
471
472 /* Enable the interrupt, clear any pending status */
473 pipestat |= mask | (mask >> 16);
474 I915_WRITE(reg, pipestat);
475 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800476}
477
478void
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200479i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
Keith Packard7c463582008-11-04 02:03:27 -0800480{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200481 u32 reg = PIPESTAT(pipe);
482 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800483
Daniel Vetterb79480b2013-06-27 17:52:10 +0200484 assert_spin_locked(&dev_priv->irq_lock);
485
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200486 if ((pipestat & mask) == 0)
487 return;
488
489 pipestat &= ~mask;
490 I915_WRITE(reg, pipestat);
491 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800492}
493
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000494/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300495 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Zhao Yakui01c66882009-10-28 05:10:00 +0000496 */
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300497static void i915_enable_asle_pipestat(struct drm_device *dev)
Zhao Yakui01c66882009-10-28 05:10:00 +0000498{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000499 drm_i915_private_t *dev_priv = dev->dev_private;
500 unsigned long irqflags;
501
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300502 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
503 return;
504
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000506
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200507 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
Jani Nikulaf8987802013-04-29 13:02:53 +0300508 if (INTEL_INFO(dev)->gen >= 4)
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200509 i915_enable_pipestat(dev_priv, PIPE_A,
510 PIPE_LEGACY_BLC_EVENT_ENABLE);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000511
512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000513}
514
515/**
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700516 * i915_pipe_enabled - check if a pipe is enabled
517 * @dev: DRM device
518 * @pipe: pipe to check
519 *
520 * Reading certain registers when the pipe is disabled can hang the chip.
521 * Use this routine to make sure the PLL is running and the pipe is active
522 * before reading such registers if unsure.
523 */
524static int
525i915_pipe_enabled(struct drm_device *dev, int pipe)
526{
527 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni702e7a52012-10-23 18:29:59 -0200528
Daniel Vettera01025a2013-05-22 00:50:23 +0200529 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
530 /* Locking is horribly broken here, but whatever. */
531 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
532 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni71f8ba62013-05-03 12:15:39 -0300533
Daniel Vettera01025a2013-05-22 00:50:23 +0200534 return intel_crtc->active;
535 } else {
536 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
537 }
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700538}
539
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +0300540static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
541{
542 /* Gen2 doesn't have a hardware frame counter */
543 return 0;
544}
545
Keith Packard42f52ef2008-10-18 19:39:29 -0700546/* Called from drm generic code, passed a 'crtc', which
547 * we use as a pipe index
548 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700549static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700550{
551 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
552 unsigned long high_frame;
553 unsigned long low_frame;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300554 u32 high1, high2, low, pixel, vbl_start;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700555
556 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800557 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800558 "pipe %c\n", pipe_name(pipe));
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700559 return 0;
560 }
561
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300562 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
563 struct intel_crtc *intel_crtc =
564 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
565 const struct drm_display_mode *mode =
566 &intel_crtc->config.adjusted_mode;
567
568 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
569 } else {
570 enum transcoder cpu_transcoder =
571 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
572 u32 htotal;
573
574 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
575 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
576
577 vbl_start *= htotal;
578 }
579
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800580 high_frame = PIPEFRAME(pipe);
581 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100582
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700583 /*
584 * High & low register fields aren't synchronized, so make sure
585 * we get a low value that's stable across two reads of the high
586 * register.
587 */
588 do {
Chris Wilson5eddb702010-09-11 13:48:45 +0100589 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300590 low = I915_READ(low_frame);
Chris Wilson5eddb702010-09-11 13:48:45 +0100591 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700592 } while (high1 != high2);
593
Chris Wilson5eddb702010-09-11 13:48:45 +0100594 high1 >>= PIPE_FRAME_HIGH_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300595 pixel = low & PIPE_PIXEL_MASK;
Chris Wilson5eddb702010-09-11 13:48:45 +0100596 low >>= PIPE_FRAME_LOW_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300597
598 /*
599 * The frame counter increments at beginning of active.
600 * Cook up a vblank counter by also checking the pixel
601 * counter against vblank start.
602 */
Ville Syrjäläedc08d02013-11-06 13:56:27 -0200603 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700604}
605
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700606static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800607{
608 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800609 int reg = PIPE_FRMCOUNT_GM45(pipe);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800610
611 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800612 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800613 "pipe %c\n", pipe_name(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800614 return 0;
615 }
616
617 return I915_READ(reg);
618}
619
Mario Kleinerad3543e2013-10-30 05:13:08 +0100620/* raw reads, only for fast reads of display block, no need for forcewake etc. */
621#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
622#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
623
Ville Syrjälä095163b2013-10-29 00:04:43 +0200624static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300625{
626 struct drm_i915_private *dev_priv = dev->dev_private;
627 uint32_t status;
628
Ville Syrjälä095163b2013-10-29 00:04:43 +0200629 if (INTEL_INFO(dev)->gen < 7) {
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300630 status = pipe == PIPE_A ?
631 DE_PIPEA_VBLANK :
632 DE_PIPEB_VBLANK;
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300633 } else {
634 switch (pipe) {
635 default:
636 case PIPE_A:
637 status = DE_PIPEA_VBLANK_IVB;
638 break;
639 case PIPE_B:
640 status = DE_PIPEB_VBLANK_IVB;
641 break;
642 case PIPE_C:
643 status = DE_PIPEC_VBLANK_IVB;
644 break;
645 }
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300646 }
Mario Kleinerad3543e2013-10-30 05:13:08 +0100647
Ville Syrjälä095163b2013-10-29 00:04:43 +0200648 return __raw_i915_read32(dev_priv, DEISR) & status;
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300649}
650
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700651static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
Ville Syrjäläabca9e42013-10-28 20:50:48 +0200652 unsigned int flags, int *vpos, int *hpos,
653 ktime_t *stime, ktime_t *etime)
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100654{
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300655 struct drm_i915_private *dev_priv = dev->dev_private;
656 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
657 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
658 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300659 int position;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100660 int vbl_start, vbl_end, htotal, vtotal;
661 bool in_vbl = true;
662 int ret = 0;
Mario Kleinerad3543e2013-10-30 05:13:08 +0100663 unsigned long irqflags;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100664
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300665 if (!intel_crtc->active) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100666 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800667 "pipe %c\n", pipe_name(pipe));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100668 return 0;
669 }
670
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300671 htotal = mode->crtc_htotal;
672 vtotal = mode->crtc_vtotal;
673 vbl_start = mode->crtc_vblank_start;
674 vbl_end = mode->crtc_vblank_end;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100675
Ville Syrjäläd31faf62013-10-28 16:31:41 +0200676 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
677 vbl_start = DIV_ROUND_UP(vbl_start, 2);
678 vbl_end /= 2;
679 vtotal /= 2;
680 }
681
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300682 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
683
Mario Kleinerad3543e2013-10-30 05:13:08 +0100684 /*
685 * Lock uncore.lock, as we will do multiple timing critical raw
686 * register reads, potentially with preemption disabled, so the
687 * following code must not block on uncore.lock.
688 */
689 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
690
691 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
692
693 /* Get optional system timestamp before query. */
694 if (stime)
695 *stime = ktime_get();
696
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300697 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100698 /* No obvious pixelcount register. Only query vertical
699 * scanout position from Display scan line register.
700 */
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300701 if (IS_GEN2(dev))
Mario Kleinerad3543e2013-10-30 05:13:08 +0100702 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300703 else
Mario Kleinerad3543e2013-10-30 05:13:08 +0100704 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300705
Ville Syrjälä095163b2013-10-29 00:04:43 +0200706 if (HAS_PCH_SPLIT(dev)) {
707 /*
708 * The scanline counter increments at the leading edge
709 * of hsync, ie. it completely misses the active portion
710 * of the line. Fix up the counter at both edges of vblank
711 * to get a more accurate picture whether we're in vblank
712 * or not.
713 */
714 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
715 if ((in_vbl && position == vbl_start - 1) ||
716 (!in_vbl && position == vbl_end - 1))
717 position = (position + 1) % vtotal;
718 } else {
719 /*
720 * ISR vblank status bits don't work the way we'd want
721 * them to work on non-PCH platforms (for
722 * ilk_pipe_in_vblank_locked()), and there doesn't
723 * appear any other way to determine if we're currently
724 * in vblank.
725 *
726 * Instead let's assume that we're already in vblank if
727 * we got called from the vblank interrupt and the
728 * scanline counter value indicates that we're on the
729 * line just prior to vblank start. This should result
730 * in the correct answer, unless the vblank interrupt
731 * delivery really got delayed for almost exactly one
732 * full frame/field.
733 */
734 if (flags & DRM_CALLED_FROM_VBLIRQ &&
735 position == vbl_start - 1) {
736 position = (position + 1) % vtotal;
737
738 /* Signal this correction as "applied". */
739 ret |= 0x8;
740 }
741 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100742 } else {
743 /* Have access to pixelcount since start of frame.
744 * We can split this into vertical and horizontal
745 * scanout position.
746 */
Mario Kleinerad3543e2013-10-30 05:13:08 +0100747 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100748
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300749 /* convert to pixel counts */
750 vbl_start *= htotal;
751 vbl_end *= htotal;
752 vtotal *= htotal;
753 }
754
Mario Kleinerad3543e2013-10-30 05:13:08 +0100755 /* Get optional system timestamp after query. */
756 if (etime)
757 *etime = ktime_get();
758
759 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
760
761 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
762
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300763 in_vbl = position >= vbl_start && position < vbl_end;
764
765 /*
766 * While in vblank, position will be negative
767 * counting up towards 0 at vbl_end. And outside
768 * vblank, position will be positive counting
769 * up since vbl_end.
770 */
771 if (position >= vbl_start)
772 position -= vbl_end;
773 else
774 position += vtotal - vbl_end;
775
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300776 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300777 *vpos = position;
778 *hpos = 0;
779 } else {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100780 *vpos = position / htotal;
781 *hpos = position - (*vpos * htotal);
782 }
783
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100784 /* In vblank? */
785 if (in_vbl)
786 ret |= DRM_SCANOUTPOS_INVBL;
787
788 return ret;
789}
790
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700791static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100792 int *max_error,
793 struct timeval *vblank_time,
794 unsigned flags)
795{
Chris Wilson4041b852011-01-22 10:07:56 +0000796 struct drm_crtc *crtc;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100797
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700798 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
Chris Wilson4041b852011-01-22 10:07:56 +0000799 DRM_ERROR("Invalid crtc %d\n", pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100800 return -EINVAL;
801 }
802
803 /* Get drm_crtc to timestamp: */
Chris Wilson4041b852011-01-22 10:07:56 +0000804 crtc = intel_get_crtc_for_pipe(dev, pipe);
805 if (crtc == NULL) {
806 DRM_ERROR("Invalid crtc %d\n", pipe);
807 return -EINVAL;
808 }
809
810 if (!crtc->enabled) {
811 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
812 return -EBUSY;
813 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100814
815 /* Helper routine in DRM core does all the work: */
Chris Wilson4041b852011-01-22 10:07:56 +0000816 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
817 vblank_time, flags,
Ville Syrjälä7da903e2013-10-26 17:57:31 +0300818 crtc,
819 &to_intel_crtc(crtc)->config.adjusted_mode);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100820}
821
Jani Nikula67c347f2013-09-17 14:26:34 +0300822static bool intel_hpd_irq_event(struct drm_device *dev,
823 struct drm_connector *connector)
Egbert Eich321a1b32013-04-11 16:00:26 +0200824{
825 enum drm_connector_status old_status;
826
827 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
828 old_status = connector->status;
829
830 connector->status = connector->funcs->detect(connector, false);
Jani Nikula67c347f2013-09-17 14:26:34 +0300831 if (old_status == connector->status)
832 return false;
833
834 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
Egbert Eich321a1b32013-04-11 16:00:26 +0200835 connector->base.id,
836 drm_get_connector_name(connector),
Jani Nikula67c347f2013-09-17 14:26:34 +0300837 drm_get_connector_status_name(old_status),
838 drm_get_connector_status_name(connector->status));
839
840 return true;
Egbert Eich321a1b32013-04-11 16:00:26 +0200841}
842
Jesse Barnes5ca58282009-03-31 14:11:15 -0700843/*
844 * Handle hotplug events outside the interrupt handler proper.
845 */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200846#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
847
Jesse Barnes5ca58282009-03-31 14:11:15 -0700848static void i915_hotplug_work_func(struct work_struct *work)
849{
850 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
851 hotplug_work);
852 struct drm_device *dev = dev_priv->dev;
Keith Packardc31c4ba2009-05-06 11:48:58 -0700853 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200854 struct intel_connector *intel_connector;
855 struct intel_encoder *intel_encoder;
856 struct drm_connector *connector;
857 unsigned long irqflags;
858 bool hpd_disabled = false;
Egbert Eich321a1b32013-04-11 16:00:26 +0200859 bool changed = false;
Egbert Eich142e2392013-04-11 15:57:57 +0200860 u32 hpd_event_bits;
Jesse Barnes5ca58282009-03-31 14:11:15 -0700861
Daniel Vetter52d7ece2012-12-01 21:03:22 +0100862 /* HPD irq before everything is fully set up. */
863 if (!dev_priv->enable_hotplug_processing)
864 return;
865
Keith Packarda65e34c2011-07-25 10:04:56 -0700866 mutex_lock(&mode_config->mutex);
Jesse Barnese67189ab2011-02-11 14:44:51 -0800867 DRM_DEBUG_KMS("running encoder hotplug functions\n");
868
Egbert Eichcd569ae2013-04-16 13:36:57 +0200869 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Egbert Eich142e2392013-04-11 15:57:57 +0200870
871 hpd_event_bits = dev_priv->hpd_event_bits;
872 dev_priv->hpd_event_bits = 0;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200873 list_for_each_entry(connector, &mode_config->connector_list, head) {
874 intel_connector = to_intel_connector(connector);
875 intel_encoder = intel_connector->encoder;
876 if (intel_encoder->hpd_pin > HPD_NONE &&
877 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
878 connector->polled == DRM_CONNECTOR_POLL_HPD) {
879 DRM_INFO("HPD interrupt storm detected on connector %s: "
880 "switching from hotplug detection to polling\n",
881 drm_get_connector_name(connector));
882 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
883 connector->polled = DRM_CONNECTOR_POLL_CONNECT
884 | DRM_CONNECTOR_POLL_DISCONNECT;
885 hpd_disabled = true;
886 }
Egbert Eich142e2392013-04-11 15:57:57 +0200887 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
888 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
889 drm_get_connector_name(connector), intel_encoder->hpd_pin);
890 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200891 }
892 /* if there were no outputs to poll, poll was disabled,
893 * therefore make sure it's enabled when disabling HPD on
894 * some connectors */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200895 if (hpd_disabled) {
Egbert Eichcd569ae2013-04-16 13:36:57 +0200896 drm_kms_helper_poll_enable(dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +0200897 mod_timer(&dev_priv->hotplug_reenable_timer,
898 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
899 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200900
901 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
902
Egbert Eich321a1b32013-04-11 16:00:26 +0200903 list_for_each_entry(connector, &mode_config->connector_list, head) {
904 intel_connector = to_intel_connector(connector);
905 intel_encoder = intel_connector->encoder;
906 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
907 if (intel_encoder->hot_plug)
908 intel_encoder->hot_plug(intel_encoder);
909 if (intel_hpd_irq_event(dev, connector))
910 changed = true;
911 }
912 }
Keith Packard40ee3382011-07-28 15:31:19 -0700913 mutex_unlock(&mode_config->mutex);
914
Egbert Eich321a1b32013-04-11 16:00:26 +0200915 if (changed)
916 drm_kms_helper_hotplug_event(dev);
Jesse Barnes5ca58282009-03-31 14:11:15 -0700917}
918
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +0200919static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
920{
921 del_timer_sync(&dev_priv->hotplug_reenable_timer);
922}
923
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200924static void ironlake_rps_change_irq_handler(struct drm_device *dev)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800925{
926 drm_i915_private_t *dev_priv = dev->dev_private;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000927 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +0200928 u8 new_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200929
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200930 spin_lock(&mchdev_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800931
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200932 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
933
Daniel Vetter20e4d402012-08-08 23:35:39 +0200934 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200935
Jesse Barnes7648fa92010-05-20 14:28:11 -0700936 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000937 busy_up = I915_READ(RCPREVBSYTUPAVG);
938 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800939 max_avg = I915_READ(RCBMAXAVG);
940 min_avg = I915_READ(RCBMINAVG);
941
942 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000943 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200944 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
945 new_delay = dev_priv->ips.cur_delay - 1;
946 if (new_delay < dev_priv->ips.max_delay)
947 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000948 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200949 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
950 new_delay = dev_priv->ips.cur_delay + 1;
951 if (new_delay > dev_priv->ips.min_delay)
952 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800953 }
954
Jesse Barnes7648fa92010-05-20 14:28:11 -0700955 if (ironlake_set_drps(dev, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +0200956 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800957
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200958 spin_unlock(&mchdev_lock);
Daniel Vetter92703882012-08-09 16:46:01 +0200959
Jesse Barnesf97108d2010-01-29 11:27:07 -0800960 return;
961}
962
Chris Wilson549f7362010-10-19 11:19:32 +0100963static void notify_ring(struct drm_device *dev,
964 struct intel_ring_buffer *ring)
965{
Chris Wilson475553d2011-01-20 09:52:56 +0000966 if (ring->obj == NULL)
967 return;
968
Chris Wilson814e9b52013-09-23 17:33:19 -0300969 trace_i915_gem_request_complete(ring);
Chris Wilson9862e602011-01-04 22:22:17 +0000970
Chris Wilson549f7362010-10-19 11:19:32 +0100971 wake_up_all(&ring->irq_queue);
Mika Kuoppala10cd45b2013-07-03 17:22:08 +0300972 i915_queue_hangcheck(dev);
Chris Wilson549f7362010-10-19 11:19:32 +0100973}
974
Ben Widawsky4912d042011-04-25 11:25:20 -0700975static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800976{
Ben Widawsky4912d042011-04-25 11:25:20 -0700977 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200978 rps.work);
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300979 u32 pm_iir;
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100980 int new_delay, adj;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800981
Daniel Vetter59cdb632013-07-04 23:35:28 +0200982 spin_lock_irq(&dev_priv->irq_lock);
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200983 pm_iir = dev_priv->rps.pm_iir;
984 dev_priv->rps.pm_iir = 0;
Ben Widawsky48484052013-05-28 19:22:27 -0700985 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300986 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
Daniel Vetter59cdb632013-07-04 23:35:28 +0200987 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawsky4912d042011-04-25 11:25:20 -0700988
Paulo Zanoni60611c12013-08-15 11:50:01 -0300989 /* Make sure we didn't queue anything we're not going to process. */
990 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
991
Ben Widawsky48484052013-05-28 19:22:27 -0700992 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800993 return;
994
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700995 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100996
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100997 adj = dev_priv->rps.last_adj;
Ville Syrjälä74250342013-06-25 21:38:11 +0300998 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100999 if (adj > 0)
1000 adj *= 2;
1001 else
1002 adj = 1;
1003 new_delay = dev_priv->rps.cur_delay + adj;
Ville Syrjälä74250342013-06-25 21:38:11 +03001004
1005 /*
1006 * For better performance, jump directly
1007 * to RPe if we're below it.
1008 */
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001009 if (new_delay < dev_priv->rps.rpe_delay)
Ville Syrjälä74250342013-06-25 21:38:11 +03001010 new_delay = dev_priv->rps.rpe_delay;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001011 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1012 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
1013 new_delay = dev_priv->rps.rpe_delay;
1014 else
1015 new_delay = dev_priv->rps.min_delay;
1016 adj = 0;
1017 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1018 if (adj < 0)
1019 adj *= 2;
1020 else
1021 adj = -1;
1022 new_delay = dev_priv->rps.cur_delay + adj;
1023 } else { /* unknown event */
1024 new_delay = dev_priv->rps.cur_delay;
1025 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001026
Ben Widawsky79249632012-09-07 19:43:42 -07001027 /* sysfs frequency interfaces may have snuck in while servicing the
1028 * interrupt
1029 */
Ville Syrjälä1272e7b2013-11-07 19:57:49 +02001030 new_delay = clamp_t(int, new_delay,
1031 dev_priv->rps.min_delay, dev_priv->rps.max_delay);
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001032 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
1033
1034 if (IS_VALLEYVIEW(dev_priv->dev))
1035 valleyview_set_rps(dev_priv->dev, new_delay);
1036 else
1037 gen6_set_rps(dev_priv->dev, new_delay);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001038
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001039 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001040}
1041
Ben Widawskye3689192012-05-25 16:56:22 -07001042
1043/**
1044 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1045 * occurred.
1046 * @work: workqueue struct
1047 *
1048 * Doesn't actually do anything except notify userspace. As a consequence of
1049 * this event, userspace should try to remap the bad rows since statistically
1050 * it is likely the same row is more likely to go bad again.
1051 */
1052static void ivybridge_parity_work(struct work_struct *work)
1053{
1054 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001055 l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001056 u32 error_status, row, bank, subbank;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001057 char *parity_event[6];
Ben Widawskye3689192012-05-25 16:56:22 -07001058 uint32_t misccpctl;
1059 unsigned long flags;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001060 uint8_t slice = 0;
Ben Widawskye3689192012-05-25 16:56:22 -07001061
1062 /* We must turn off DOP level clock gating to access the L3 registers.
1063 * In order to prevent a get/put style interface, acquire struct mutex
1064 * any time we access those registers.
1065 */
1066 mutex_lock(&dev_priv->dev->struct_mutex);
1067
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001068 /* If we've screwed up tracking, just let the interrupt fire again */
1069 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1070 goto out;
1071
Ben Widawskye3689192012-05-25 16:56:22 -07001072 misccpctl = I915_READ(GEN7_MISCCPCTL);
1073 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1074 POSTING_READ(GEN7_MISCCPCTL);
1075
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001076 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1077 u32 reg;
Ben Widawskye3689192012-05-25 16:56:22 -07001078
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001079 slice--;
1080 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1081 break;
1082
1083 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1084
1085 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1086
1087 error_status = I915_READ(reg);
1088 row = GEN7_PARITY_ERROR_ROW(error_status);
1089 bank = GEN7_PARITY_ERROR_BANK(error_status);
1090 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1091
1092 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1093 POSTING_READ(reg);
1094
1095 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1096 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1097 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1098 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1099 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1100 parity_event[5] = NULL;
1101
Dave Airlie5bdebb12013-10-11 14:07:25 +10001102 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001103 KOBJ_CHANGE, parity_event);
1104
1105 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1106 slice, row, bank, subbank);
1107
1108 kfree(parity_event[4]);
1109 kfree(parity_event[3]);
1110 kfree(parity_event[2]);
1111 kfree(parity_event[1]);
1112 }
Ben Widawskye3689192012-05-25 16:56:22 -07001113
1114 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1115
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001116out:
1117 WARN_ON(dev_priv->l3_parity.which_slice);
Ben Widawskye3689192012-05-25 16:56:22 -07001118 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001119 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
Ben Widawskye3689192012-05-25 16:56:22 -07001120 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1121
1122 mutex_unlock(&dev_priv->dev->struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001123}
1124
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001125static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
Ben Widawskye3689192012-05-25 16:56:22 -07001126{
1127 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Ben Widawskye3689192012-05-25 16:56:22 -07001128
Ben Widawsky040d2ba2013-09-19 11:01:40 -07001129 if (!HAS_L3_DPF(dev))
Ben Widawskye3689192012-05-25 16:56:22 -07001130 return;
1131
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001132 spin_lock(&dev_priv->irq_lock);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001133 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001134 spin_unlock(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001135
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001136 iir &= GT_PARITY_ERROR(dev);
1137 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1138 dev_priv->l3_parity.which_slice |= 1 << 1;
1139
1140 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1141 dev_priv->l3_parity.which_slice |= 1 << 0;
1142
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001143 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001144}
1145
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001146static void ilk_gt_irq_handler(struct drm_device *dev,
1147 struct drm_i915_private *dev_priv,
1148 u32 gt_iir)
1149{
1150 if (gt_iir &
1151 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1152 notify_ring(dev, &dev_priv->ring[RCS]);
1153 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1154 notify_ring(dev, &dev_priv->ring[VCS]);
1155}
1156
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001157static void snb_gt_irq_handler(struct drm_device *dev,
1158 struct drm_i915_private *dev_priv,
1159 u32 gt_iir)
1160{
1161
Ben Widawskycc609d52013-05-28 19:22:29 -07001162 if (gt_iir &
1163 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001164 notify_ring(dev, &dev_priv->ring[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001165 if (gt_iir & GT_BSD_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001166 notify_ring(dev, &dev_priv->ring[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001167 if (gt_iir & GT_BLT_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001168 notify_ring(dev, &dev_priv->ring[BCS]);
1169
Ben Widawskycc609d52013-05-28 19:22:29 -07001170 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1171 GT_BSD_CS_ERROR_INTERRUPT |
1172 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001173 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
1174 i915_handle_error(dev, false);
1175 }
Ben Widawskye3689192012-05-25 16:56:22 -07001176
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001177 if (gt_iir & GT_PARITY_ERROR(dev))
1178 ivybridge_parity_error_irq_handler(dev, gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001179}
1180
Ben Widawskyabd58f02013-11-02 21:07:09 -07001181static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1182 struct drm_i915_private *dev_priv,
1183 u32 master_ctl)
1184{
1185 u32 rcs, bcs, vcs;
1186 uint32_t tmp = 0;
1187 irqreturn_t ret = IRQ_NONE;
1188
1189 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1190 tmp = I915_READ(GEN8_GT_IIR(0));
1191 if (tmp) {
1192 ret = IRQ_HANDLED;
1193 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1194 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1195 if (rcs & GT_RENDER_USER_INTERRUPT)
1196 notify_ring(dev, &dev_priv->ring[RCS]);
1197 if (bcs & GT_RENDER_USER_INTERRUPT)
1198 notify_ring(dev, &dev_priv->ring[BCS]);
1199 I915_WRITE(GEN8_GT_IIR(0), tmp);
1200 } else
1201 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1202 }
1203
1204 if (master_ctl & GEN8_GT_VCS1_IRQ) {
1205 tmp = I915_READ(GEN8_GT_IIR(1));
1206 if (tmp) {
1207 ret = IRQ_HANDLED;
1208 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1209 if (vcs & GT_RENDER_USER_INTERRUPT)
1210 notify_ring(dev, &dev_priv->ring[VCS]);
1211 I915_WRITE(GEN8_GT_IIR(1), tmp);
1212 } else
1213 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1214 }
1215
1216 if (master_ctl & GEN8_GT_VECS_IRQ) {
1217 tmp = I915_READ(GEN8_GT_IIR(3));
1218 if (tmp) {
1219 ret = IRQ_HANDLED;
1220 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1221 if (vcs & GT_RENDER_USER_INTERRUPT)
1222 notify_ring(dev, &dev_priv->ring[VECS]);
1223 I915_WRITE(GEN8_GT_IIR(3), tmp);
1224 } else
1225 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1226 }
1227
1228 return ret;
1229}
1230
Egbert Eichb543fb02013-04-16 13:36:54 +02001231#define HPD_STORM_DETECT_PERIOD 1000
1232#define HPD_STORM_THRESHOLD 5
1233
Daniel Vetter10a504d2013-06-27 17:52:12 +02001234static inline void intel_hpd_irq_handler(struct drm_device *dev,
Daniel Vetter22062db2013-06-27 17:52:11 +02001235 u32 hotplug_trigger,
1236 const u32 *hpd)
Egbert Eichb543fb02013-04-16 13:36:54 +02001237{
1238 drm_i915_private_t *dev_priv = dev->dev_private;
Egbert Eichb543fb02013-04-16 13:36:54 +02001239 int i;
Daniel Vetter10a504d2013-06-27 17:52:12 +02001240 bool storm_detected = false;
Egbert Eichb543fb02013-04-16 13:36:54 +02001241
Daniel Vetter91d131d2013-06-27 17:52:14 +02001242 if (!hotplug_trigger)
1243 return;
1244
Imre Deakcc9bd492014-01-16 19:56:54 +02001245 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1246 hotplug_trigger);
1247
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02001248 spin_lock(&dev_priv->irq_lock);
Egbert Eichb543fb02013-04-16 13:36:54 +02001249 for (i = 1; i < HPD_NUM_PINS; i++) {
Egbert Eich821450c2013-04-16 13:36:55 +02001250
Chris Wilson34320872014-01-10 18:49:20 +00001251 WARN_ONCE(hpd[i] & hotplug_trigger &&
Chris Wilson8b5565b2014-01-10 18:49:21 +00001252 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
Chris Wilsoncba1c072014-01-10 20:17:07 +00001253 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1254 hotplug_trigger, i, hpd[i]);
Egbert Eichb8f102e2013-07-26 14:14:24 +02001255
Egbert Eichb543fb02013-04-16 13:36:54 +02001256 if (!(hpd[i] & hotplug_trigger) ||
1257 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1258 continue;
1259
Jani Nikulabc5ead8c2013-05-07 15:10:29 +03001260 dev_priv->hpd_event_bits |= (1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001261 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1262 dev_priv->hpd_stats[i].hpd_last_jiffies
1263 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1264 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1265 dev_priv->hpd_stats[i].hpd_cnt = 0;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001266 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001267 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1268 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
Egbert Eich142e2392013-04-11 15:57:57 +02001269 dev_priv->hpd_event_bits &= ~(1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001270 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
Daniel Vetter10a504d2013-06-27 17:52:12 +02001271 storm_detected = true;
Egbert Eichb543fb02013-04-16 13:36:54 +02001272 } else {
1273 dev_priv->hpd_stats[i].hpd_cnt++;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001274 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1275 dev_priv->hpd_stats[i].hpd_cnt);
Egbert Eichb543fb02013-04-16 13:36:54 +02001276 }
1277 }
1278
Daniel Vetter10a504d2013-06-27 17:52:12 +02001279 if (storm_detected)
1280 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02001281 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter5876fa02013-06-27 17:52:13 +02001282
Daniel Vetter645416f2013-09-02 16:22:25 +02001283 /*
1284 * Our hotplug handler can grab modeset locks (by calling down into the
1285 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1286 * queue for otherwise the flush_work in the pageflip code will
1287 * deadlock.
1288 */
1289 schedule_work(&dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02001290}
1291
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001292static void gmbus_irq_handler(struct drm_device *dev)
1293{
Daniel Vetter28c70f12012-12-01 13:53:45 +01001294 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1295
Daniel Vetter28c70f12012-12-01 13:53:45 +01001296 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001297}
1298
Daniel Vetterce99c252012-12-01 13:53:47 +01001299static void dp_aux_irq_handler(struct drm_device *dev)
1300{
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001301 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1302
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001303 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +01001304}
1305
Shuang He8bf1e9f2013-10-15 18:55:27 +01001306#if defined(CONFIG_DEBUG_FS)
Daniel Vetter277de952013-10-18 16:37:07 +02001307static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1308 uint32_t crc0, uint32_t crc1,
1309 uint32_t crc2, uint32_t crc3,
1310 uint32_t crc4)
Shuang He8bf1e9f2013-10-15 18:55:27 +01001311{
1312 struct drm_i915_private *dev_priv = dev->dev_private;
1313 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1314 struct intel_pipe_crc_entry *entry;
Damien Lespiauac2300d2013-10-15 18:55:30 +01001315 int head, tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001316
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001317 spin_lock(&pipe_crc->lock);
1318
Damien Lespiau0c912c72013-10-15 18:55:37 +01001319 if (!pipe_crc->entries) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001320 spin_unlock(&pipe_crc->lock);
Damien Lespiau0c912c72013-10-15 18:55:37 +01001321 DRM_ERROR("spurious interrupt\n");
1322 return;
1323 }
1324
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001325 head = pipe_crc->head;
1326 tail = pipe_crc->tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001327
1328 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001329 spin_unlock(&pipe_crc->lock);
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001330 DRM_ERROR("CRC buffer overflowing\n");
1331 return;
1332 }
1333
1334 entry = &pipe_crc->entries[head];
Shuang He8bf1e9f2013-10-15 18:55:27 +01001335
Daniel Vetter8bc5e952013-10-16 22:55:49 +02001336 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
Daniel Vettereba94eb2013-10-16 22:55:46 +02001337 entry->crc[0] = crc0;
1338 entry->crc[1] = crc1;
1339 entry->crc[2] = crc2;
1340 entry->crc[3] = crc3;
1341 entry->crc[4] = crc4;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001342
1343 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001344 pipe_crc->head = head;
1345
1346 spin_unlock(&pipe_crc->lock);
Damien Lespiau07144422013-10-15 18:55:40 +01001347
1348 wake_up_interruptible(&pipe_crc->wq);
Shuang He8bf1e9f2013-10-15 18:55:27 +01001349}
Daniel Vetter277de952013-10-18 16:37:07 +02001350#else
1351static inline void
1352display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1353 uint32_t crc0, uint32_t crc1,
1354 uint32_t crc2, uint32_t crc3,
1355 uint32_t crc4) {}
1356#endif
Daniel Vettereba94eb2013-10-16 22:55:46 +02001357
Daniel Vetter277de952013-10-18 16:37:07 +02001358
1359static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vetter5a69b892013-10-16 22:55:52 +02001360{
1361 struct drm_i915_private *dev_priv = dev->dev_private;
1362
Daniel Vetter277de952013-10-18 16:37:07 +02001363 display_pipe_crc_irq_handler(dev, pipe,
1364 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1365 0, 0, 0, 0);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001366}
1367
Daniel Vetter277de952013-10-18 16:37:07 +02001368static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vettereba94eb2013-10-16 22:55:46 +02001369{
1370 struct drm_i915_private *dev_priv = dev->dev_private;
1371
Daniel Vetter277de952013-10-18 16:37:07 +02001372 display_pipe_crc_irq_handler(dev, pipe,
1373 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1374 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1375 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1376 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1377 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
Daniel Vettereba94eb2013-10-16 22:55:46 +02001378}
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001379
Daniel Vetter277de952013-10-18 16:37:07 +02001380static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001381{
1382 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001383 uint32_t res1, res2;
1384
1385 if (INTEL_INFO(dev)->gen >= 3)
1386 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1387 else
1388 res1 = 0;
1389
1390 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1391 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1392 else
1393 res2 = 0;
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001394
Daniel Vetter277de952013-10-18 16:37:07 +02001395 display_pipe_crc_irq_handler(dev, pipe,
1396 I915_READ(PIPE_CRC_RES_RED(pipe)),
1397 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1398 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1399 res1, res2);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001400}
Shuang He8bf1e9f2013-10-15 18:55:27 +01001401
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001402/* The RPS events need forcewake, so we add them to a work queue and mask their
1403 * IMR bits until the work is done. Other interrupts can be processed without
1404 * the work queue. */
1405static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
Ben Widawskybaf02a12013-05-28 19:22:24 -07001406{
Daniel Vetter41a05a32013-07-04 23:35:26 +02001407 if (pm_iir & GEN6_PM_RPS_EVENTS) {
Daniel Vetter59cdb632013-07-04 23:35:28 +02001408 spin_lock(&dev_priv->irq_lock);
Daniel Vetter41a05a32013-07-04 23:35:26 +02001409 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
Paulo Zanoni4d3b3d52013-08-09 17:04:36 -03001410 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
Daniel Vetter59cdb632013-07-04 23:35:28 +02001411 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter2adbee62013-07-04 23:35:27 +02001412
1413 queue_work(dev_priv->wq, &dev_priv->rps.work);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001414 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001415
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001416 if (HAS_VEBOX(dev_priv->dev)) {
1417 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1418 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
Ben Widawsky12638c52013-05-28 19:22:31 -07001419
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001420 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1421 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
1422 i915_handle_error(dev_priv->dev, false);
1423 }
Ben Widawsky12638c52013-05-28 19:22:31 -07001424 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001425}
1426
Daniel Vetterff1f5252012-10-02 15:10:55 +02001427static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001428{
1429 struct drm_device *dev = (struct drm_device *) arg;
1430 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1431 u32 iir, gt_iir, pm_iir;
1432 irqreturn_t ret = IRQ_NONE;
1433 unsigned long irqflags;
1434 int pipe;
1435 u32 pipe_stats[I915_MAX_PIPES];
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001436
1437 atomic_inc(&dev_priv->irq_received);
1438
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001439 while (true) {
1440 iir = I915_READ(VLV_IIR);
1441 gt_iir = I915_READ(GTIIR);
1442 pm_iir = I915_READ(GEN6_PMIIR);
1443
1444 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1445 goto out;
1446
1447 ret = IRQ_HANDLED;
1448
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001449 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001450
1451 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1452 for_each_pipe(pipe) {
1453 int reg = PIPESTAT(pipe);
1454 pipe_stats[pipe] = I915_READ(reg);
1455
1456 /*
1457 * Clear the PIPE*STAT regs before the IIR
1458 */
1459 if (pipe_stats[pipe] & 0x8000ffff) {
1460 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1461 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1462 pipe_name(pipe));
1463 I915_WRITE(reg, pipe_stats[pipe]);
1464 }
1465 }
1466 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1467
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001468 for_each_pipe(pipe) {
Jesse Barnes7b5562d2013-11-05 15:48:01 -08001469 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001470 drm_handle_vblank(dev, pipe);
1471
1472 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1473 intel_prepare_page_flip(dev, pipe);
1474 intel_finish_page_flip(dev, pipe);
1475 }
Daniel Vetter4356d582013-10-16 22:55:55 +02001476
1477 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02001478 i9xx_pipe_crc_irq_handler(dev, pipe);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001479 }
1480
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001481 /* Consume port. Then clear IIR or we'll miss events */
1482 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1483 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02001484 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001485
Daniel Vetter91d131d2013-06-27 17:52:14 +02001486 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1487
Daniel Vetter4aeebd72013-10-31 09:53:36 +01001488 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1489 dp_aux_irq_handler(dev);
1490
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001491 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1492 I915_READ(PORT_HOTPLUG_STAT);
1493 }
1494
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001495 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1496 gmbus_irq_handler(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001497
Paulo Zanoni60611c12013-08-15 11:50:01 -03001498 if (pm_iir)
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001499 gen6_rps_irq_handler(dev_priv, pm_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001500
1501 I915_WRITE(GTIIR, gt_iir);
1502 I915_WRITE(GEN6_PMIIR, pm_iir);
1503 I915_WRITE(VLV_IIR, iir);
1504 }
1505
1506out:
1507 return ret;
1508}
1509
Adam Jackson23e81d62012-06-06 15:45:44 -04001510static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08001511{
1512 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001513 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001514 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08001515
Daniel Vetter91d131d2013-06-27 17:52:14 +02001516 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1517
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001518 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1519 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1520 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08001521 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001522 port_name(port));
1523 }
Jesse Barnes776ad802011-01-04 15:09:39 -08001524
Daniel Vetterce99c252012-12-01 13:53:47 +01001525 if (pch_iir & SDE_AUX_MASK)
1526 dp_aux_irq_handler(dev);
1527
Jesse Barnes776ad802011-01-04 15:09:39 -08001528 if (pch_iir & SDE_GMBUS)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001529 gmbus_irq_handler(dev);
Jesse Barnes776ad802011-01-04 15:09:39 -08001530
1531 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1532 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1533
1534 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1535 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1536
1537 if (pch_iir & SDE_POISON)
1538 DRM_ERROR("PCH poison interrupt\n");
1539
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001540 if (pch_iir & SDE_FDI_MASK)
1541 for_each_pipe(pipe)
1542 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1543 pipe_name(pipe),
1544 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08001545
1546 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1547 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1548
1549 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1550 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1551
Jesse Barnes776ad802011-01-04 15:09:39 -08001552 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Paulo Zanoni86642812013-04-12 17:57:57 -03001553 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1554 false))
1555 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1556
1557 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1558 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1559 false))
1560 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1561}
1562
1563static void ivb_err_int_handler(struct drm_device *dev)
1564{
1565 struct drm_i915_private *dev_priv = dev->dev_private;
1566 u32 err_int = I915_READ(GEN7_ERR_INT);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001567 enum pipe pipe;
Paulo Zanoni86642812013-04-12 17:57:57 -03001568
Paulo Zanonide032bf2013-04-12 17:57:58 -03001569 if (err_int & ERR_INT_POISON)
1570 DRM_ERROR("Poison interrupt\n");
1571
Daniel Vetter5a69b892013-10-16 22:55:52 +02001572 for_each_pipe(pipe) {
1573 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1574 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1575 false))
1576 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1577 pipe_name(pipe));
1578 }
Paulo Zanoni86642812013-04-12 17:57:57 -03001579
Daniel Vetter5a69b892013-10-16 22:55:52 +02001580 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1581 if (IS_IVYBRIDGE(dev))
Daniel Vetter277de952013-10-18 16:37:07 +02001582 ivb_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001583 else
Daniel Vetter277de952013-10-18 16:37:07 +02001584 hsw_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001585 }
1586 }
Shuang He8bf1e9f2013-10-15 18:55:27 +01001587
Paulo Zanoni86642812013-04-12 17:57:57 -03001588 I915_WRITE(GEN7_ERR_INT, err_int);
1589}
1590
1591static void cpt_serr_int_handler(struct drm_device *dev)
1592{
1593 struct drm_i915_private *dev_priv = dev->dev_private;
1594 u32 serr_int = I915_READ(SERR_INT);
1595
Paulo Zanonide032bf2013-04-12 17:57:58 -03001596 if (serr_int & SERR_INT_POISON)
1597 DRM_ERROR("PCH poison interrupt\n");
1598
Paulo Zanoni86642812013-04-12 17:57:57 -03001599 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1600 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1601 false))
1602 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1603
1604 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1605 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1606 false))
1607 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1608
1609 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1610 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1611 false))
1612 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1613
1614 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08001615}
1616
Adam Jackson23e81d62012-06-06 15:45:44 -04001617static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1618{
1619 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1620 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001621 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04001622
Daniel Vetter91d131d2013-06-27 17:52:14 +02001623 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1624
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001625 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1626 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1627 SDE_AUDIO_POWER_SHIFT_CPT);
1628 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1629 port_name(port));
1630 }
Adam Jackson23e81d62012-06-06 15:45:44 -04001631
1632 if (pch_iir & SDE_AUX_MASK_CPT)
Daniel Vetterce99c252012-12-01 13:53:47 +01001633 dp_aux_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001634
1635 if (pch_iir & SDE_GMBUS_CPT)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001636 gmbus_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001637
1638 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1639 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1640
1641 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1642 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1643
1644 if (pch_iir & SDE_FDI_MASK_CPT)
1645 for_each_pipe(pipe)
1646 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1647 pipe_name(pipe),
1648 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03001649
1650 if (pch_iir & SDE_ERROR_CPT)
1651 cpt_serr_int_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001652}
1653
Paulo Zanonic008bc62013-07-12 16:35:10 -03001654static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1655{
1656 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter40da17c2013-10-21 18:04:36 +02001657 enum pipe pipe;
Paulo Zanonic008bc62013-07-12 16:35:10 -03001658
1659 if (de_iir & DE_AUX_CHANNEL_A)
1660 dp_aux_irq_handler(dev);
1661
1662 if (de_iir & DE_GSE)
1663 intel_opregion_asle_intr(dev);
1664
Paulo Zanonic008bc62013-07-12 16:35:10 -03001665 if (de_iir & DE_POISON)
1666 DRM_ERROR("Poison interrupt\n");
1667
Daniel Vetter40da17c2013-10-21 18:04:36 +02001668 for_each_pipe(pipe) {
1669 if (de_iir & DE_PIPE_VBLANK(pipe))
1670 drm_handle_vblank(dev, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03001671
Daniel Vetter40da17c2013-10-21 18:04:36 +02001672 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1673 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1674 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1675 pipe_name(pipe));
Paulo Zanonic008bc62013-07-12 16:35:10 -03001676
Daniel Vetter40da17c2013-10-21 18:04:36 +02001677 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1678 i9xx_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001679
Daniel Vetter40da17c2013-10-21 18:04:36 +02001680 /* plane/pipes map 1:1 on ilk+ */
1681 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1682 intel_prepare_page_flip(dev, pipe);
1683 intel_finish_page_flip_plane(dev, pipe);
1684 }
Paulo Zanonic008bc62013-07-12 16:35:10 -03001685 }
1686
1687 /* check event from PCH */
1688 if (de_iir & DE_PCH_EVENT) {
1689 u32 pch_iir = I915_READ(SDEIIR);
1690
1691 if (HAS_PCH_CPT(dev))
1692 cpt_irq_handler(dev, pch_iir);
1693 else
1694 ibx_irq_handler(dev, pch_iir);
1695
1696 /* should clear PCH hotplug event before clear CPU irq */
1697 I915_WRITE(SDEIIR, pch_iir);
1698 }
1699
1700 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1701 ironlake_rps_change_irq_handler(dev);
1702}
1703
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001704static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1705{
1706 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02001707 enum pipe i;
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001708
1709 if (de_iir & DE_ERR_INT_IVB)
1710 ivb_err_int_handler(dev);
1711
1712 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1713 dp_aux_irq_handler(dev);
1714
1715 if (de_iir & DE_GSE_IVB)
1716 intel_opregion_asle_intr(dev);
1717
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02001718 for_each_pipe(i) {
Daniel Vetter40da17c2013-10-21 18:04:36 +02001719 if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001720 drm_handle_vblank(dev, i);
Daniel Vetter40da17c2013-10-21 18:04:36 +02001721
1722 /* plane/pipes map 1:1 on ilk+ */
1723 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001724 intel_prepare_page_flip(dev, i);
1725 intel_finish_page_flip_plane(dev, i);
1726 }
1727 }
1728
1729 /* check event from PCH */
1730 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1731 u32 pch_iir = I915_READ(SDEIIR);
1732
1733 cpt_irq_handler(dev, pch_iir);
1734
1735 /* clear PCH hotplug event before clear CPU irq */
1736 I915_WRITE(SDEIIR, pch_iir);
1737 }
1738}
1739
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001740static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001741{
1742 struct drm_device *dev = (struct drm_device *) arg;
1743 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001744 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01001745 irqreturn_t ret = IRQ_NONE;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001746
1747 atomic_inc(&dev_priv->irq_received);
1748
Paulo Zanoni86642812013-04-12 17:57:57 -03001749 /* We get interrupts on unclaimed registers, so check for this before we
1750 * do any I915_{READ,WRITE}. */
Chris Wilson907b28c2013-07-19 20:36:52 +01001751 intel_uncore_check_errors(dev);
Paulo Zanoni86642812013-04-12 17:57:57 -03001752
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001753 /* disable master interrupt before clearing iir */
1754 de_ier = I915_READ(DEIER);
1755 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Paulo Zanoni23a78512013-07-12 16:35:14 -03001756 POSTING_READ(DEIER);
Chris Wilson0e434062012-05-09 21:45:44 +01001757
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001758 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1759 * interrupts will will be stored on its back queue, and then we'll be
1760 * able to process them after we restore SDEIER (as soon as we restore
1761 * it, we'll get an interrupt if SDEIIR still has something to process
1762 * due to its back queue). */
Ben Widawskyab5c6082013-04-05 13:12:41 -07001763 if (!HAS_PCH_NOP(dev)) {
1764 sde_ier = I915_READ(SDEIER);
1765 I915_WRITE(SDEIER, 0);
1766 POSTING_READ(SDEIER);
1767 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001768
Chris Wilson0e434062012-05-09 21:45:44 +01001769 gt_iir = I915_READ(GTIIR);
1770 if (gt_iir) {
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001771 if (INTEL_INFO(dev)->gen >= 6)
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001772 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001773 else
1774 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001775 I915_WRITE(GTIIR, gt_iir);
1776 ret = IRQ_HANDLED;
1777 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001778
1779 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01001780 if (de_iir) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001781 if (INTEL_INFO(dev)->gen >= 7)
1782 ivb_display_irq_handler(dev, de_iir);
1783 else
1784 ilk_display_irq_handler(dev, de_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001785 I915_WRITE(DEIIR, de_iir);
1786 ret = IRQ_HANDLED;
1787 }
1788
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001789 if (INTEL_INFO(dev)->gen >= 6) {
1790 u32 pm_iir = I915_READ(GEN6_PMIIR);
1791 if (pm_iir) {
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001792 gen6_rps_irq_handler(dev_priv, pm_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001793 I915_WRITE(GEN6_PMIIR, pm_iir);
1794 ret = IRQ_HANDLED;
1795 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001796 }
1797
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001798 I915_WRITE(DEIER, de_ier);
1799 POSTING_READ(DEIER);
Ben Widawskyab5c6082013-04-05 13:12:41 -07001800 if (!HAS_PCH_NOP(dev)) {
1801 I915_WRITE(SDEIER, sde_ier);
1802 POSTING_READ(SDEIER);
1803 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001804
1805 return ret;
1806}
1807
Ben Widawskyabd58f02013-11-02 21:07:09 -07001808static irqreturn_t gen8_irq_handler(int irq, void *arg)
1809{
1810 struct drm_device *dev = arg;
1811 struct drm_i915_private *dev_priv = dev->dev_private;
1812 u32 master_ctl;
1813 irqreturn_t ret = IRQ_NONE;
1814 uint32_t tmp = 0;
Daniel Vetterc42664c2013-11-07 11:05:40 +01001815 enum pipe pipe;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001816
1817 atomic_inc(&dev_priv->irq_received);
1818
1819 master_ctl = I915_READ(GEN8_MASTER_IRQ);
1820 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
1821 if (!master_ctl)
1822 return IRQ_NONE;
1823
1824 I915_WRITE(GEN8_MASTER_IRQ, 0);
1825 POSTING_READ(GEN8_MASTER_IRQ);
1826
1827 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1828
1829 if (master_ctl & GEN8_DE_MISC_IRQ) {
1830 tmp = I915_READ(GEN8_DE_MISC_IIR);
1831 if (tmp & GEN8_DE_MISC_GSE)
1832 intel_opregion_asle_intr(dev);
1833 else if (tmp)
1834 DRM_ERROR("Unexpected DE Misc interrupt\n");
1835 else
1836 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
1837
1838 if (tmp) {
1839 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
1840 ret = IRQ_HANDLED;
1841 }
1842 }
1843
Daniel Vetter6d766f02013-11-07 14:49:55 +01001844 if (master_ctl & GEN8_DE_PORT_IRQ) {
1845 tmp = I915_READ(GEN8_DE_PORT_IIR);
1846 if (tmp & GEN8_AUX_CHANNEL_A)
1847 dp_aux_irq_handler(dev);
1848 else if (tmp)
1849 DRM_ERROR("Unexpected DE Port interrupt\n");
1850 else
1851 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
1852
1853 if (tmp) {
1854 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
1855 ret = IRQ_HANDLED;
1856 }
1857 }
1858
Daniel Vetterc42664c2013-11-07 11:05:40 +01001859 for_each_pipe(pipe) {
1860 uint32_t pipe_iir;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001861
Daniel Vetterc42664c2013-11-07 11:05:40 +01001862 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
1863 continue;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001864
Daniel Vetterc42664c2013-11-07 11:05:40 +01001865 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
1866 if (pipe_iir & GEN8_PIPE_VBLANK)
1867 drm_handle_vblank(dev, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001868
Daniel Vetterc42664c2013-11-07 11:05:40 +01001869 if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
1870 intel_prepare_page_flip(dev, pipe);
1871 intel_finish_page_flip_plane(dev, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001872 }
Daniel Vetterc42664c2013-11-07 11:05:40 +01001873
Daniel Vetter0fbe7872013-11-07 11:05:44 +01001874 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
1875 hsw_pipe_crc_irq_handler(dev, pipe);
1876
Daniel Vetter38d83c962013-11-07 11:05:46 +01001877 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
1878 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1879 false))
1880 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1881 pipe_name(pipe));
1882 }
1883
Daniel Vetter30100f22013-11-07 14:49:24 +01001884 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
1885 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
1886 pipe_name(pipe),
1887 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
1888 }
Daniel Vetterc42664c2013-11-07 11:05:40 +01001889
1890 if (pipe_iir) {
1891 ret = IRQ_HANDLED;
1892 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
1893 } else
Ben Widawskyabd58f02013-11-02 21:07:09 -07001894 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
1895 }
1896
Daniel Vetter92d03a82013-11-07 11:05:43 +01001897 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
1898 /*
1899 * FIXME(BDW): Assume for now that the new interrupt handling
1900 * scheme also closed the SDE interrupt handling race we've seen
1901 * on older pch-split platforms. But this needs testing.
1902 */
1903 u32 pch_iir = I915_READ(SDEIIR);
1904
1905 cpt_irq_handler(dev, pch_iir);
1906
1907 if (pch_iir) {
1908 I915_WRITE(SDEIIR, pch_iir);
1909 ret = IRQ_HANDLED;
1910 }
1911 }
1912
Ben Widawskyabd58f02013-11-02 21:07:09 -07001913 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1914 POSTING_READ(GEN8_MASTER_IRQ);
1915
1916 return ret;
1917}
1918
Daniel Vetter17e1df02013-09-08 21:57:13 +02001919static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1920 bool reset_completed)
1921{
1922 struct intel_ring_buffer *ring;
1923 int i;
1924
1925 /*
1926 * Notify all waiters for GPU completion events that reset state has
1927 * been changed, and that they need to restart their wait after
1928 * checking for potential errors (and bail out to drop locks if there is
1929 * a gpu reset pending so that i915_error_work_func can acquire them).
1930 */
1931
1932 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1933 for_each_ring(ring, dev_priv, i)
1934 wake_up_all(&ring->irq_queue);
1935
1936 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
1937 wake_up_all(&dev_priv->pending_flip_queue);
1938
1939 /*
1940 * Signal tasks blocked in i915_gem_wait_for_error that the pending
1941 * reset state is cleared.
1942 */
1943 if (reset_completed)
1944 wake_up_all(&dev_priv->gpu_error.reset_queue);
1945}
1946
Jesse Barnes8a905232009-07-11 16:48:03 -04001947/**
1948 * i915_error_work_func - do process context error handling work
1949 * @work: work struct
1950 *
1951 * Fire an error uevent so userspace can see that a hang or error
1952 * was detected.
1953 */
1954static void i915_error_work_func(struct work_struct *work)
1955{
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001956 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1957 work);
1958 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1959 gpu_error);
Jesse Barnes8a905232009-07-11 16:48:03 -04001960 struct drm_device *dev = dev_priv->dev;
Ben Widawskycce723e2013-07-19 09:16:42 -07001961 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1962 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1963 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
Daniel Vetter17e1df02013-09-08 21:57:13 +02001964 int ret;
Jesse Barnes8a905232009-07-11 16:48:03 -04001965
Dave Airlie5bdebb12013-10-11 14:07:25 +10001966 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04001967
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001968 /*
1969 * Note that there's only one work item which does gpu resets, so we
1970 * need not worry about concurrent gpu resets potentially incrementing
1971 * error->reset_counter twice. We only need to take care of another
1972 * racing irq/hangcheck declaring the gpu dead for a second time. A
1973 * quick check for that is good enough: schedule_work ensures the
1974 * correct ordering between hang detection and this work item, and since
1975 * the reset in-progress bit is only ever set by code outside of this
1976 * work we don't need to worry about any other races.
1977 */
1978 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
Chris Wilsonf803aa52010-09-19 12:38:26 +01001979 DRM_DEBUG_DRIVER("resetting chip\n");
Dave Airlie5bdebb12013-10-11 14:07:25 +10001980 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001981 reset_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001982
Daniel Vetter17e1df02013-09-08 21:57:13 +02001983 /*
1984 * All state reset _must_ be completed before we update the
1985 * reset counter, for otherwise waiters might miss the reset
1986 * pending state and not properly drop locks, resulting in
1987 * deadlocks with the reset work.
1988 */
Daniel Vetterf69061b2012-12-06 09:01:42 +01001989 ret = i915_reset(dev);
1990
Daniel Vetter17e1df02013-09-08 21:57:13 +02001991 intel_display_handle_reset(dev);
1992
Daniel Vetterf69061b2012-12-06 09:01:42 +01001993 if (ret == 0) {
1994 /*
1995 * After all the gem state is reset, increment the reset
1996 * counter and wake up everyone waiting for the reset to
1997 * complete.
1998 *
1999 * Since unlock operations are a one-sided barrier only,
2000 * we need to insert a barrier here to order any seqno
2001 * updates before
2002 * the counter increment.
2003 */
2004 smp_mb__before_atomic_inc();
2005 atomic_inc(&dev_priv->gpu_error.reset_counter);
2006
Dave Airlie5bdebb12013-10-11 14:07:25 +10002007 kobject_uevent_env(&dev->primary->kdev->kobj,
Daniel Vetterf69061b2012-12-06 09:01:42 +01002008 KOBJ_CHANGE, reset_done_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002009 } else {
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02002010 atomic_set_mask(I915_WEDGED, &error->reset_counter);
Ben Gamarif316a422009-09-14 17:48:46 -04002011 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002012
Daniel Vetter17e1df02013-09-08 21:57:13 +02002013 /*
2014 * Note: The wake_up also serves as a memory barrier so that
2015 * waiters see the update value of the reset counter atomic_t.
2016 */
2017 i915_error_wake_up(dev_priv, true);
Ben Gamarif316a422009-09-14 17:48:46 -04002018 }
Jesse Barnes8a905232009-07-11 16:48:03 -04002019}
2020
Chris Wilson35aed2e2010-05-27 13:18:12 +01002021static void i915_report_and_clear_eir(struct drm_device *dev)
Jesse Barnes8a905232009-07-11 16:48:03 -04002022{
2023 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskybd9854f2012-08-23 15:18:09 -07002024 uint32_t instdone[I915_NUM_INSTDONE_REG];
Jesse Barnes8a905232009-07-11 16:48:03 -04002025 u32 eir = I915_READ(EIR);
Ben Widawsky050ee912012-08-22 11:32:15 -07002026 int pipe, i;
Jesse Barnes8a905232009-07-11 16:48:03 -04002027
Chris Wilson35aed2e2010-05-27 13:18:12 +01002028 if (!eir)
2029 return;
Jesse Barnes8a905232009-07-11 16:48:03 -04002030
Joe Perchesa70491c2012-03-18 13:00:11 -07002031 pr_err("render error detected, EIR: 0x%08x\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04002032
Ben Widawskybd9854f2012-08-23 15:18:09 -07002033 i915_get_extra_instdone(dev, instdone);
2034
Jesse Barnes8a905232009-07-11 16:48:03 -04002035 if (IS_G4X(dev)) {
2036 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2037 u32 ipeir = I915_READ(IPEIR_I965);
2038
Joe Perchesa70491c2012-03-18 13:00:11 -07002039 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2040 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Ben Widawsky050ee912012-08-22 11:32:15 -07002041 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2042 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Joe Perchesa70491c2012-03-18 13:00:11 -07002043 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002044 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002045 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002046 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002047 }
2048 if (eir & GM45_ERROR_PAGE_TABLE) {
2049 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002050 pr_err("page table error\n");
2051 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002052 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002053 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002054 }
2055 }
2056
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002057 if (!IS_GEN2(dev)) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002058 if (eir & I915_ERROR_PAGE_TABLE) {
2059 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002060 pr_err("page table error\n");
2061 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002062 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002063 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002064 }
2065 }
2066
2067 if (eir & I915_ERROR_MEMORY_REFRESH) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002068 pr_err("memory refresh error:\n");
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002069 for_each_pipe(pipe)
Joe Perchesa70491c2012-03-18 13:00:11 -07002070 pr_err("pipe %c stat: 0x%08x\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002071 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
Jesse Barnes8a905232009-07-11 16:48:03 -04002072 /* pipestat has already been acked */
2073 }
2074 if (eir & I915_ERROR_INSTRUCTION) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002075 pr_err("instruction error\n");
2076 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
Ben Widawsky050ee912012-08-22 11:32:15 -07002077 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2078 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002079 if (INTEL_INFO(dev)->gen < 4) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002080 u32 ipeir = I915_READ(IPEIR);
2081
Joe Perchesa70491c2012-03-18 13:00:11 -07002082 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2083 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
Joe Perchesa70491c2012-03-18 13:00:11 -07002084 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
Jesse Barnes8a905232009-07-11 16:48:03 -04002085 I915_WRITE(IPEIR, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002086 POSTING_READ(IPEIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002087 } else {
2088 u32 ipeir = I915_READ(IPEIR_I965);
2089
Joe Perchesa70491c2012-03-18 13:00:11 -07002090 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2091 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Joe Perchesa70491c2012-03-18 13:00:11 -07002092 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002093 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002094 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002095 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002096 }
2097 }
2098
2099 I915_WRITE(EIR, eir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002100 POSTING_READ(EIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002101 eir = I915_READ(EIR);
2102 if (eir) {
2103 /*
2104 * some errors might have become stuck,
2105 * mask them.
2106 */
2107 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2108 I915_WRITE(EMR, I915_READ(EMR) | eir);
2109 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2110 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01002111}
2112
2113/**
2114 * i915_handle_error - handle an error interrupt
2115 * @dev: drm device
2116 *
2117 * Do some basic checking of regsiter state at error interrupt time and
2118 * dump it to the syslog. Also call i915_capture_error_state() to make
2119 * sure we get a record and make it available in debugfs. Fire a uevent
2120 * so userspace knows something bad happened (should trigger collection
2121 * of a ring dump etc.).
2122 */
Chris Wilson527f9e92010-11-11 01:16:58 +00002123void i915_handle_error(struct drm_device *dev, bool wedged)
Chris Wilson35aed2e2010-05-27 13:18:12 +01002124{
2125 struct drm_i915_private *dev_priv = dev->dev_private;
2126
2127 i915_capture_error_state(dev);
2128 i915_report_and_clear_eir(dev);
Jesse Barnes8a905232009-07-11 16:48:03 -04002129
Ben Gamariba1234d2009-09-14 17:48:47 -04002130 if (wedged) {
Daniel Vetterf69061b2012-12-06 09:01:42 +01002131 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2132 &dev_priv->gpu_error.reset_counter);
Ben Gamariba1234d2009-09-14 17:48:47 -04002133
Ben Gamari11ed50e2009-09-14 17:48:45 -04002134 /*
Daniel Vetter17e1df02013-09-08 21:57:13 +02002135 * Wakeup waiting processes so that the reset work function
2136 * i915_error_work_func doesn't deadlock trying to grab various
2137 * locks. By bumping the reset counter first, the woken
2138 * processes will see a reset in progress and back off,
2139 * releasing their locks and then wait for the reset completion.
2140 * We must do this for _all_ gpu waiters that might hold locks
2141 * that the reset work needs to acquire.
2142 *
2143 * Note: The wake_up serves as the required memory barrier to
2144 * ensure that the waiters see the updated value of the reset
2145 * counter atomic_t.
Ben Gamari11ed50e2009-09-14 17:48:45 -04002146 */
Daniel Vetter17e1df02013-09-08 21:57:13 +02002147 i915_error_wake_up(dev_priv, false);
Ben Gamari11ed50e2009-09-14 17:48:45 -04002148 }
2149
Daniel Vetter122f46b2013-09-04 17:36:14 +02002150 /*
2151 * Our reset work can grab modeset locks (since it needs to reset the
2152 * state of outstanding pagelips). Hence it must not be run on our own
2153 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2154 * code will deadlock.
2155 */
2156 schedule_work(&dev_priv->gpu_error.work);
Jesse Barnes8a905232009-07-11 16:48:03 -04002157}
2158
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002159static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002160{
2161 drm_i915_private_t *dev_priv = dev->dev_private;
2162 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2163 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Chris Wilson05394f32010-11-08 19:18:58 +00002164 struct drm_i915_gem_object *obj;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002165 struct intel_unpin_work *work;
2166 unsigned long flags;
2167 bool stall_detected;
2168
2169 /* Ignore early vblank irqs */
2170 if (intel_crtc == NULL)
2171 return;
2172
2173 spin_lock_irqsave(&dev->event_lock, flags);
2174 work = intel_crtc->unpin_work;
2175
Chris Wilsone7d841c2012-12-03 11:36:30 +00002176 if (work == NULL ||
2177 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2178 !work->enable_stall_check) {
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002179 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2180 spin_unlock_irqrestore(&dev->event_lock, flags);
2181 return;
2182 }
2183
2184 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
Chris Wilson05394f32010-11-08 19:18:58 +00002185 obj = work->pending_flip_obj;
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002186 if (INTEL_INFO(dev)->gen >= 4) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002187 int dspsurf = DSPSURF(intel_crtc->plane);
Armin Reese446f2542012-03-30 16:20:16 -07002188 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002189 i915_gem_obj_ggtt_offset(obj);
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002190 } else {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002191 int dspaddr = DSPADDR(intel_crtc->plane);
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002192 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
Ville Syrjälä01f2c772011-12-20 00:06:49 +02002193 crtc->y * crtc->fb->pitches[0] +
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002194 crtc->x * crtc->fb->bits_per_pixel/8);
2195 }
2196
2197 spin_unlock_irqrestore(&dev->event_lock, flags);
2198
2199 if (stall_detected) {
2200 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2201 intel_prepare_page_flip(dev, intel_crtc->plane);
2202 }
2203}
2204
Keith Packard42f52ef2008-10-18 19:39:29 -07002205/* Called from drm generic code, passed 'crtc' which
2206 * we use as a pipe index
2207 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002208static int i915_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002209{
2210 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002211 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002212
Chris Wilson5eddb702010-09-11 13:48:45 +01002213 if (!i915_pipe_enabled(dev, pipe))
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002214 return -EINVAL;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002215
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002216 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002217 if (INTEL_INFO(dev)->gen >= 4)
Keith Packard7c463582008-11-04 02:03:27 -08002218 i915_enable_pipestat(dev_priv, pipe,
2219 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Keith Packarde9d21d72008-10-16 11:31:38 -07002220 else
Keith Packard7c463582008-11-04 02:03:27 -08002221 i915_enable_pipestat(dev_priv, pipe,
2222 PIPE_VBLANK_INTERRUPT_ENABLE);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002223
2224 /* maintain vblank delivery even in deep C-states */
2225 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002226 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002227 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002228
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002229 return 0;
2230}
2231
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002232static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002233{
2234 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2235 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002236 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c2013-10-21 18:04:36 +02002237 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002238
2239 if (!i915_pipe_enabled(dev, pipe))
2240 return -EINVAL;
2241
2242 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002243 ironlake_enable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002244 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2245
2246 return 0;
2247}
2248
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002249static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2250{
2251 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2252 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002253 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002254
2255 if (!i915_pipe_enabled(dev, pipe))
2256 return -EINVAL;
2257
2258 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002259 imr = I915_READ(VLV_IMR);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02002260 if (pipe == PIPE_A)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002261 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002262 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002263 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002264 I915_WRITE(VLV_IMR, imr);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002265 i915_enable_pipestat(dev_priv, pipe,
2266 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002267 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2268
2269 return 0;
2270}
2271
Ben Widawskyabd58f02013-11-02 21:07:09 -07002272static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2273{
2274 struct drm_i915_private *dev_priv = dev->dev_private;
2275 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002276
2277 if (!i915_pipe_enabled(dev, pipe))
2278 return -EINVAL;
2279
2280 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter7167d7c2013-11-07 11:05:45 +01002281 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2282 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2283 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
Ben Widawskyabd58f02013-11-02 21:07:09 -07002284 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2285 return 0;
2286}
2287
Keith Packard42f52ef2008-10-18 19:39:29 -07002288/* Called from drm generic code, passed 'crtc' which
2289 * we use as a pipe index
2290 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002291static void i915_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002292{
2293 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002294 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002295
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002296 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002297 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002298 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson8692d00e2011-02-05 10:08:21 +00002299
Jesse Barnesf796cf82011-04-07 13:58:17 -07002300 i915_disable_pipestat(dev_priv, pipe,
2301 PIPE_VBLANK_INTERRUPT_ENABLE |
2302 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2303 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2304}
2305
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002306static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002307{
2308 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2309 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002310 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c2013-10-21 18:04:36 +02002311 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002312
2313 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002314 ironlake_disable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002315 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2316}
2317
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002318static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2319{
2320 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2321 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002322 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002323
2324 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002325 i915_disable_pipestat(dev_priv, pipe,
2326 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002327 imr = I915_READ(VLV_IMR);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02002328 if (pipe == PIPE_A)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002329 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002330 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002331 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002332 I915_WRITE(VLV_IMR, imr);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002333 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2334}
2335
Ben Widawskyabd58f02013-11-02 21:07:09 -07002336static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2337{
2338 struct drm_i915_private *dev_priv = dev->dev_private;
2339 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002340
2341 if (!i915_pipe_enabled(dev, pipe))
2342 return;
2343
2344 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter7167d7c2013-11-07 11:05:45 +01002345 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2346 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2347 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
Ben Widawskyabd58f02013-11-02 21:07:09 -07002348 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2349}
2350
Chris Wilson893eead2010-10-27 14:44:35 +01002351static u32
2352ring_last_seqno(struct intel_ring_buffer *ring)
Zou Nan hai852835f2010-05-21 09:08:56 +08002353{
Chris Wilson893eead2010-10-27 14:44:35 +01002354 return list_entry(ring->request_list.prev,
2355 struct drm_i915_gem_request, list)->seqno;
2356}
2357
Chris Wilson9107e9d2013-06-10 11:20:20 +01002358static bool
2359ring_idle(struct intel_ring_buffer *ring, u32 seqno)
Chris Wilson893eead2010-10-27 14:44:35 +01002360{
Chris Wilson9107e9d2013-06-10 11:20:20 +01002361 return (list_empty(&ring->request_list) ||
2362 i915_seqno_passed(seqno, ring_last_seqno(ring)));
Ben Gamarif65d9422009-09-14 17:48:44 -04002363}
2364
Chris Wilson6274f212013-06-10 11:20:21 +01002365static struct intel_ring_buffer *
2366semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
Chris Wilsona24a11e2013-03-14 17:52:05 +02002367{
2368 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Chris Wilson6274f212013-06-10 11:20:21 +01002369 u32 cmd, ipehr, acthd, acthd_min;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002370
2371 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2372 if ((ipehr & ~(0x3 << 16)) !=
2373 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
Chris Wilson6274f212013-06-10 11:20:21 +01002374 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002375
2376 /* ACTHD is likely pointing to the dword after the actual command,
2377 * so scan backwards until we find the MBOX.
2378 */
Chris Wilson6274f212013-06-10 11:20:21 +01002379 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002380 acthd_min = max((int)acthd - 3 * 4, 0);
2381 do {
2382 cmd = ioread32(ring->virtual_start + acthd);
2383 if (cmd == ipehr)
2384 break;
2385
2386 acthd -= 4;
2387 if (acthd < acthd_min)
Chris Wilson6274f212013-06-10 11:20:21 +01002388 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002389 } while (1);
2390
Chris Wilson6274f212013-06-10 11:20:21 +01002391 *seqno = ioread32(ring->virtual_start+acthd+4)+1;
2392 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
Chris Wilsona24a11e2013-03-14 17:52:05 +02002393}
2394
Chris Wilson6274f212013-06-10 11:20:21 +01002395static int semaphore_passed(struct intel_ring_buffer *ring)
2396{
2397 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2398 struct intel_ring_buffer *signaller;
2399 u32 seqno, ctl;
2400
2401 ring->hangcheck.deadlock = true;
2402
2403 signaller = semaphore_waits_for(ring, &seqno);
2404 if (signaller == NULL || signaller->hangcheck.deadlock)
2405 return -1;
2406
2407 /* cursory check for an unkickable deadlock */
2408 ctl = I915_READ_CTL(signaller);
2409 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2410 return -1;
2411
2412 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2413}
2414
2415static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2416{
2417 struct intel_ring_buffer *ring;
2418 int i;
2419
2420 for_each_ring(ring, dev_priv, i)
2421 ring->hangcheck.deadlock = false;
2422}
2423
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002424static enum intel_ring_hangcheck_action
2425ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002426{
2427 struct drm_device *dev = ring->dev;
2428 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002429 u32 tmp;
2430
Chris Wilson6274f212013-06-10 11:20:21 +01002431 if (ring->hangcheck.acthd != acthd)
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002432 return HANGCHECK_ACTIVE;
Chris Wilson6274f212013-06-10 11:20:21 +01002433
Chris Wilson9107e9d2013-06-10 11:20:20 +01002434 if (IS_GEN2(dev))
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002435 return HANGCHECK_HUNG;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002436
2437 /* Is the chip hanging on a WAIT_FOR_EVENT?
2438 * If so we can simply poke the RB_WAIT bit
2439 * and break the hang. This should work on
2440 * all but the second generation chipsets.
2441 */
2442 tmp = I915_READ_CTL(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002443 if (tmp & RING_WAIT) {
2444 DRM_ERROR("Kicking stuck wait on %s\n",
2445 ring->name);
Chris Wilson09e14bf2013-10-10 09:37:19 +01002446 i915_handle_error(dev, false);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002447 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002448 return HANGCHECK_KICK;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002449 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02002450
Chris Wilson6274f212013-06-10 11:20:21 +01002451 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2452 switch (semaphore_passed(ring)) {
2453 default:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002454 return HANGCHECK_HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002455 case 1:
2456 DRM_ERROR("Kicking stuck semaphore on %s\n",
2457 ring->name);
Chris Wilson09e14bf2013-10-10 09:37:19 +01002458 i915_handle_error(dev, false);
Chris Wilson6274f212013-06-10 11:20:21 +01002459 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002460 return HANGCHECK_KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002461 case 0:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002462 return HANGCHECK_WAIT;
Chris Wilson6274f212013-06-10 11:20:21 +01002463 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002464 }
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002465
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002466 return HANGCHECK_HUNG;
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002467}
2468
Ben Gamarif65d9422009-09-14 17:48:44 -04002469/**
2470 * This is called when the chip hasn't reported back with completed
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002471 * batchbuffers in a long time. We keep track per ring seqno progress and
2472 * if there are no progress, hangcheck score for that ring is increased.
2473 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2474 * we kick the ring. If we see no progress on three subsequent calls
2475 * we assume chip is wedged and try to fix it by resetting the chip.
Ben Gamarif65d9422009-09-14 17:48:44 -04002476 */
Damien Lespiaua658b5d2013-08-08 22:28:56 +01002477static void i915_hangcheck_elapsed(unsigned long data)
Ben Gamarif65d9422009-09-14 17:48:44 -04002478{
2479 struct drm_device *dev = (struct drm_device *)data;
2480 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002481 struct intel_ring_buffer *ring;
Chris Wilsonb4519512012-05-11 14:29:30 +01002482 int i;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002483 int busy_count = 0, rings_hung = 0;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002484 bool stuck[I915_NUM_RINGS] = { 0 };
2485#define BUSY 1
2486#define KICK 5
2487#define HUNG 20
2488#define FIRE 30
Chris Wilson893eead2010-10-27 14:44:35 +01002489
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07002490 if (!i915_enable_hangcheck)
2491 return;
2492
Chris Wilsonb4519512012-05-11 14:29:30 +01002493 for_each_ring(ring, dev_priv, i) {
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002494 u32 seqno, acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002495 bool busy = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01002496
Chris Wilson6274f212013-06-10 11:20:21 +01002497 semaphore_clear_deadlocks(dev_priv);
2498
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002499 seqno = ring->get_seqno(ring, false);
2500 acthd = intel_ring_get_active_head(ring);
Chris Wilsond1e61e72012-04-10 17:00:41 +01002501
Chris Wilson9107e9d2013-06-10 11:20:20 +01002502 if (ring->hangcheck.seqno == seqno) {
2503 if (ring_idle(ring, seqno)) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002504 ring->hangcheck.action = HANGCHECK_IDLE;
2505
Chris Wilson9107e9d2013-06-10 11:20:20 +01002506 if (waitqueue_active(&ring->irq_queue)) {
2507 /* Issue a wake-up to catch stuck h/w. */
Chris Wilson094f9a52013-09-25 17:34:55 +01002508 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
Daniel Vetterf4adcd22013-10-28 09:24:13 +01002509 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2510 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2511 ring->name);
2512 else
2513 DRM_INFO("Fake missed irq on %s\n",
2514 ring->name);
Chris Wilson094f9a52013-09-25 17:34:55 +01002515 wake_up_all(&ring->irq_queue);
2516 }
2517 /* Safeguard against driver failure */
2518 ring->hangcheck.score += BUSY;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002519 } else
2520 busy = false;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002521 } else {
Chris Wilson6274f212013-06-10 11:20:21 +01002522 /* We always increment the hangcheck score
2523 * if the ring is busy and still processing
2524 * the same request, so that no single request
2525 * can run indefinitely (such as a chain of
2526 * batches). The only time we do not increment
2527 * the hangcheck score on this ring, if this
2528 * ring is in a legitimate wait for another
2529 * ring. In that case the waiting ring is a
2530 * victim and we want to be sure we catch the
2531 * right culprit. Then every time we do kick
2532 * the ring, add a small increment to the
2533 * score so that we can catch a batch that is
2534 * being repeatedly kicked and so responsible
2535 * for stalling the machine.
2536 */
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002537 ring->hangcheck.action = ring_stuck(ring,
2538 acthd);
2539
2540 switch (ring->hangcheck.action) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002541 case HANGCHECK_IDLE:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002542 case HANGCHECK_WAIT:
Chris Wilson6274f212013-06-10 11:20:21 +01002543 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002544 case HANGCHECK_ACTIVE:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002545 ring->hangcheck.score += BUSY;
Chris Wilson6274f212013-06-10 11:20:21 +01002546 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002547 case HANGCHECK_KICK:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002548 ring->hangcheck.score += KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002549 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002550 case HANGCHECK_HUNG:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002551 ring->hangcheck.score += HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002552 stuck[i] = true;
2553 break;
2554 }
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002555 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002556 } else {
Mika Kuoppalada661462013-09-06 16:03:28 +03002557 ring->hangcheck.action = HANGCHECK_ACTIVE;
2558
Chris Wilson9107e9d2013-06-10 11:20:20 +01002559 /* Gradually reduce the count so that we catch DoS
2560 * attempts across multiple batches.
2561 */
2562 if (ring->hangcheck.score > 0)
2563 ring->hangcheck.score--;
Chris Wilsond1e61e72012-04-10 17:00:41 +01002564 }
2565
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002566 ring->hangcheck.seqno = seqno;
2567 ring->hangcheck.acthd = acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002568 busy_count += busy;
Chris Wilson893eead2010-10-27 14:44:35 +01002569 }
Eric Anholtb9201c12010-01-08 14:25:16 -08002570
Mika Kuoppala92cab732013-05-24 17:16:07 +03002571 for_each_ring(ring, dev_priv, i) {
Chris Wilson9107e9d2013-06-10 11:20:20 +01002572 if (ring->hangcheck.score > FIRE) {
Daniel Vetterb8d88d12013-08-28 10:57:59 +02002573 DRM_INFO("%s on %s\n",
2574 stuck[i] ? "stuck" : "no progress",
2575 ring->name);
Chris Wilsona43adf02013-06-10 11:20:22 +01002576 rings_hung++;
Mika Kuoppala92cab732013-05-24 17:16:07 +03002577 }
2578 }
2579
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002580 if (rings_hung)
2581 return i915_handle_error(dev, true);
Ben Gamarif65d9422009-09-14 17:48:44 -04002582
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002583 if (busy_count)
2584 /* Reset timer case chip hangs without another request
2585 * being added */
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002586 i915_queue_hangcheck(dev);
2587}
2588
2589void i915_queue_hangcheck(struct drm_device *dev)
2590{
2591 struct drm_i915_private *dev_priv = dev->dev_private;
2592 if (!i915_enable_hangcheck)
2593 return;
2594
2595 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2596 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
Ben Gamarif65d9422009-09-14 17:48:44 -04002597}
2598
Paulo Zanoni91738a92013-06-05 14:21:51 -03002599static void ibx_irq_preinstall(struct drm_device *dev)
2600{
2601 struct drm_i915_private *dev_priv = dev->dev_private;
2602
2603 if (HAS_PCH_NOP(dev))
2604 return;
2605
2606 /* south display irq */
2607 I915_WRITE(SDEIMR, 0xffffffff);
2608 /*
2609 * SDEIER is also touched by the interrupt handler to work around missed
2610 * PCH interrupts. Hence we can't update it after the interrupt handler
2611 * is enabled - instead we unconditionally enable all PCH interrupt
2612 * sources here, but then only unmask them as needed with SDEIMR.
2613 */
2614 I915_WRITE(SDEIER, 0xffffffff);
2615 POSTING_READ(SDEIER);
2616}
2617
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002618static void gen5_gt_irq_preinstall(struct drm_device *dev)
2619{
2620 struct drm_i915_private *dev_priv = dev->dev_private;
2621
2622 /* and GT */
2623 I915_WRITE(GTIMR, 0xffffffff);
2624 I915_WRITE(GTIER, 0x0);
2625 POSTING_READ(GTIER);
2626
2627 if (INTEL_INFO(dev)->gen >= 6) {
2628 /* and PM */
2629 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2630 I915_WRITE(GEN6_PMIER, 0x0);
2631 POSTING_READ(GEN6_PMIER);
2632 }
2633}
2634
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635/* drm_dma.h hooks
2636*/
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002637static void ironlake_irq_preinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002638{
2639 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2640
Jesse Barnes46979952011-04-07 13:53:55 -07002641 atomic_set(&dev_priv->irq_received, 0);
2642
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002643 I915_WRITE(HWSTAM, 0xeffe);
Daniel Vetterbdfcdb62012-01-05 01:05:26 +01002644
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002645 I915_WRITE(DEIMR, 0xffffffff);
2646 I915_WRITE(DEIER, 0x0);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002647 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002648
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002649 gen5_gt_irq_preinstall(dev);
Zhenyu Wangc6501562009-11-03 18:57:21 +00002650
Paulo Zanoni91738a92013-06-05 14:21:51 -03002651 ibx_irq_preinstall(dev);
Ben Widawsky7d991632013-05-28 19:22:25 -07002652}
2653
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002654static void valleyview_irq_preinstall(struct drm_device *dev)
2655{
2656 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2657 int pipe;
2658
2659 atomic_set(&dev_priv->irq_received, 0);
2660
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002661 /* VLV magic */
2662 I915_WRITE(VLV_IMR, 0);
2663 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2664 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2665 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2666
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002667 /* and GT */
2668 I915_WRITE(GTIIR, I915_READ(GTIIR));
2669 I915_WRITE(GTIIR, I915_READ(GTIIR));
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002670
2671 gen5_gt_irq_preinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002672
2673 I915_WRITE(DPINVGTT, 0xff);
2674
2675 I915_WRITE(PORT_HOTPLUG_EN, 0);
2676 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2677 for_each_pipe(pipe)
2678 I915_WRITE(PIPESTAT(pipe), 0xffff);
2679 I915_WRITE(VLV_IIR, 0xffffffff);
2680 I915_WRITE(VLV_IMR, 0xffffffff);
2681 I915_WRITE(VLV_IER, 0x0);
2682 POSTING_READ(VLV_IER);
2683}
2684
Ben Widawskyabd58f02013-11-02 21:07:09 -07002685static void gen8_irq_preinstall(struct drm_device *dev)
2686{
2687 struct drm_i915_private *dev_priv = dev->dev_private;
2688 int pipe;
2689
2690 atomic_set(&dev_priv->irq_received, 0);
2691
2692 I915_WRITE(GEN8_MASTER_IRQ, 0);
2693 POSTING_READ(GEN8_MASTER_IRQ);
2694
2695 /* IIR can theoretically queue up two events. Be paranoid */
2696#define GEN8_IRQ_INIT_NDX(type, which) do { \
2697 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2698 POSTING_READ(GEN8_##type##_IMR(which)); \
2699 I915_WRITE(GEN8_##type##_IER(which), 0); \
2700 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2701 POSTING_READ(GEN8_##type##_IIR(which)); \
2702 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2703 } while (0)
2704
2705#define GEN8_IRQ_INIT(type) do { \
2706 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2707 POSTING_READ(GEN8_##type##_IMR); \
2708 I915_WRITE(GEN8_##type##_IER, 0); \
2709 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2710 POSTING_READ(GEN8_##type##_IIR); \
2711 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2712 } while (0)
2713
2714 GEN8_IRQ_INIT_NDX(GT, 0);
2715 GEN8_IRQ_INIT_NDX(GT, 1);
2716 GEN8_IRQ_INIT_NDX(GT, 2);
2717 GEN8_IRQ_INIT_NDX(GT, 3);
2718
2719 for_each_pipe(pipe) {
2720 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
2721 }
2722
2723 GEN8_IRQ_INIT(DE_PORT);
2724 GEN8_IRQ_INIT(DE_MISC);
2725 GEN8_IRQ_INIT(PCU);
2726#undef GEN8_IRQ_INIT
2727#undef GEN8_IRQ_INIT_NDX
2728
2729 POSTING_READ(GEN8_PCU_IIR);
Jesse Barnes09f23442014-01-10 13:13:09 -08002730
2731 ibx_irq_preinstall(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002732}
2733
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002734static void ibx_hpd_irq_setup(struct drm_device *dev)
Keith Packard7fe0b972011-09-19 13:31:02 -07002735{
2736 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002737 struct drm_mode_config *mode_config = &dev->mode_config;
2738 struct intel_encoder *intel_encoder;
Daniel Vetterfee884e2013-07-04 23:35:21 +02002739 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
Keith Packard7fe0b972011-09-19 13:31:02 -07002740
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002741 if (HAS_PCH_IBX(dev)) {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002742 hotplug_irqs = SDE_HOTPLUG_MASK;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002743 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002744 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002745 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002746 } else {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002747 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002748 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002749 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002750 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002751 }
2752
Daniel Vetterfee884e2013-07-04 23:35:21 +02002753 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002754
2755 /*
2756 * Enable digital hotplug on the PCH, and configure the DP short pulse
2757 * duration to 2ms (which is the minimum in the Display Port spec)
2758 *
2759 * This register is the same on all known PCH chips.
2760 */
Keith Packard7fe0b972011-09-19 13:31:02 -07002761 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2762 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2763 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2764 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2765 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2766 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2767}
2768
Paulo Zanonid46da432013-02-08 17:35:15 -02002769static void ibx_irq_postinstall(struct drm_device *dev)
2770{
2771 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002772 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02002773
Daniel Vetter692a04c2013-05-29 21:43:05 +02002774 if (HAS_PCH_NOP(dev))
2775 return;
2776
Paulo Zanoni86642812013-04-12 17:57:57 -03002777 if (HAS_PCH_IBX(dev)) {
2778 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
Paulo Zanonide032bf2013-04-12 17:57:58 -03002779 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
Paulo Zanoni86642812013-04-12 17:57:57 -03002780 } else {
2781 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2782
2783 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2784 }
Ben Widawskyab5c6082013-04-05 13:12:41 -07002785
Paulo Zanonid46da432013-02-08 17:35:15 -02002786 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2787 I915_WRITE(SDEIMR, ~mask);
Paulo Zanonid46da432013-02-08 17:35:15 -02002788}
2789
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002790static void gen5_gt_irq_postinstall(struct drm_device *dev)
2791{
2792 struct drm_i915_private *dev_priv = dev->dev_private;
2793 u32 pm_irqs, gt_irqs;
2794
2795 pm_irqs = gt_irqs = 0;
2796
2797 dev_priv->gt_irq_mask = ~0;
Ben Widawsky040d2ba2013-09-19 11:01:40 -07002798 if (HAS_L3_DPF(dev)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002799 /* L3 parity interrupt is always unmasked. */
Ben Widawsky35a85ac2013-09-19 11:13:41 -07002800 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
2801 gt_irqs |= GT_PARITY_ERROR(dev);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002802 }
2803
2804 gt_irqs |= GT_RENDER_USER_INTERRUPT;
2805 if (IS_GEN5(dev)) {
2806 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2807 ILK_BSD_USER_INTERRUPT;
2808 } else {
2809 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2810 }
2811
2812 I915_WRITE(GTIIR, I915_READ(GTIIR));
2813 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2814 I915_WRITE(GTIER, gt_irqs);
2815 POSTING_READ(GTIER);
2816
2817 if (INTEL_INFO(dev)->gen >= 6) {
2818 pm_irqs |= GEN6_PM_RPS_EVENTS;
2819
2820 if (HAS_VEBOX(dev))
2821 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2822
Paulo Zanoni605cd252013-08-06 18:57:15 -03002823 dev_priv->pm_irq_mask = 0xffffffff;
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002824 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
Paulo Zanoni605cd252013-08-06 18:57:15 -03002825 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002826 I915_WRITE(GEN6_PMIER, pm_irqs);
2827 POSTING_READ(GEN6_PMIER);
2828 }
2829}
2830
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002831static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002832{
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002833 unsigned long irqflags;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002834 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002835 u32 display_mask, extra_mask;
2836
2837 if (INTEL_INFO(dev)->gen >= 7) {
2838 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2839 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
2840 DE_PLANEB_FLIP_DONE_IVB |
2841 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2842 DE_ERR_INT_IVB);
2843 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2844 DE_PIPEA_VBLANK_IVB);
2845
2846 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2847 } else {
2848 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2849 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02002850 DE_AUX_CHANNEL_A |
2851 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
2852 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
2853 DE_POISON);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002854 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2855 }
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002856
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002857 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002858
2859 /* should always can generate irq */
2860 I915_WRITE(DEIIR, I915_READ(DEIIR));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002861 I915_WRITE(DEIMR, dev_priv->irq_mask);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002862 I915_WRITE(DEIER, display_mask | extra_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002863 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002864
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002865 gen5_gt_irq_postinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002866
Paulo Zanonid46da432013-02-08 17:35:15 -02002867 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07002868
Jesse Barnesf97108d2010-01-29 11:27:07 -08002869 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02002870 /* Enable PCU event interrupts
2871 *
2872 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002873 * setup is guaranteed to run in single-threaded context. But we
2874 * need it to make the assert_spin_locked happy. */
2875 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08002876 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002877 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08002878 }
2879
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002880 return 0;
2881}
2882
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002883static int valleyview_irq_postinstall(struct drm_device *dev)
2884{
2885 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002886 u32 enable_mask;
Daniel Vetter379ef822013-10-16 22:55:56 +02002887 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
2888 PIPE_CRC_DONE_ENABLE;
Daniel Vetterb79480b2013-06-27 17:52:10 +02002889 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002890
2891 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002892 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2893 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2894 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002895 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2896
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002897 /*
2898 *Leave vblank interrupts masked initially. enable/disable will
2899 * toggle them based on usage.
2900 */
2901 dev_priv->irq_mask = (~enable_mask) |
2902 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2903 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002904
Daniel Vetter20afbda2012-12-11 14:05:07 +01002905 I915_WRITE(PORT_HOTPLUG_EN, 0);
2906 POSTING_READ(PORT_HOTPLUG_EN);
2907
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002908 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2909 I915_WRITE(VLV_IER, enable_mask);
2910 I915_WRITE(VLV_IIR, 0xffffffff);
2911 I915_WRITE(PIPESTAT(0), 0xffff);
2912 I915_WRITE(PIPESTAT(1), 0xffff);
2913 POSTING_READ(VLV_IER);
2914
Daniel Vetterb79480b2013-06-27 17:52:10 +02002915 /* Interrupt setup is already guaranteed to be single-threaded, this is
2916 * just to make the assert_spin_locked check happy. */
2917 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02002918 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
2919 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
2920 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
Daniel Vetterb79480b2013-06-27 17:52:10 +02002921 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002922
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002923 I915_WRITE(VLV_IIR, 0xffffffff);
2924 I915_WRITE(VLV_IIR, 0xffffffff);
2925
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002926 gen5_gt_irq_postinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002927
2928 /* ack & enable invalid PTE error interrupts */
2929#if 0 /* FIXME: add support to irq handler for checking these bits */
2930 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2931 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2932#endif
2933
2934 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Daniel Vetter20afbda2012-12-11 14:05:07 +01002935
2936 return 0;
2937}
2938
Ben Widawskyabd58f02013-11-02 21:07:09 -07002939static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
2940{
2941 int i;
2942
2943 /* These are interrupts we'll toggle with the ring mask register */
2944 uint32_t gt_interrupts[] = {
2945 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
2946 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
2947 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
2948 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
2949 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
2950 0,
2951 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
2952 };
2953
2954 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
2955 u32 tmp = I915_READ(GEN8_GT_IIR(i));
2956 if (tmp)
2957 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2958 i, tmp);
2959 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
2960 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
2961 }
2962 POSTING_READ(GEN8_GT_IER(0));
2963}
2964
2965static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
2966{
2967 struct drm_device *dev = dev_priv->dev;
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01002968 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
2969 GEN8_PIPE_CDCLK_CRC_DONE |
2970 GEN8_PIPE_FIFO_UNDERRUN |
2971 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2972 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002973 int pipe;
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01002974 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
2975 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
2976 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002977
2978 for_each_pipe(pipe) {
2979 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2980 if (tmp)
2981 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2982 pipe, tmp);
2983 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2984 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
2985 }
2986 POSTING_READ(GEN8_DE_PIPE_ISR(0));
2987
Daniel Vetter6d766f02013-11-07 14:49:55 +01002988 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
2989 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002990 POSTING_READ(GEN8_DE_PORT_IER);
2991}
2992
2993static int gen8_irq_postinstall(struct drm_device *dev)
2994{
2995 struct drm_i915_private *dev_priv = dev->dev_private;
2996
2997 gen8_gt_irq_postinstall(dev_priv);
2998 gen8_de_irq_postinstall(dev_priv);
2999
3000 ibx_irq_postinstall(dev);
3001
3002 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3003 POSTING_READ(GEN8_MASTER_IRQ);
3004
3005 return 0;
3006}
3007
3008static void gen8_irq_uninstall(struct drm_device *dev)
3009{
3010 struct drm_i915_private *dev_priv = dev->dev_private;
3011 int pipe;
3012
3013 if (!dev_priv)
3014 return;
3015
3016 atomic_set(&dev_priv->irq_received, 0);
3017
3018 I915_WRITE(GEN8_MASTER_IRQ, 0);
3019
3020#define GEN8_IRQ_FINI_NDX(type, which) do { \
3021 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3022 I915_WRITE(GEN8_##type##_IER(which), 0); \
3023 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3024 } while (0)
3025
3026#define GEN8_IRQ_FINI(type) do { \
3027 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3028 I915_WRITE(GEN8_##type##_IER, 0); \
3029 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3030 } while (0)
3031
3032 GEN8_IRQ_FINI_NDX(GT, 0);
3033 GEN8_IRQ_FINI_NDX(GT, 1);
3034 GEN8_IRQ_FINI_NDX(GT, 2);
3035 GEN8_IRQ_FINI_NDX(GT, 3);
3036
3037 for_each_pipe(pipe) {
3038 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
3039 }
3040
3041 GEN8_IRQ_FINI(DE_PORT);
3042 GEN8_IRQ_FINI(DE_MISC);
3043 GEN8_IRQ_FINI(PCU);
3044#undef GEN8_IRQ_FINI
3045#undef GEN8_IRQ_FINI_NDX
3046
3047 POSTING_READ(GEN8_PCU_IIR);
3048}
3049
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003050static void valleyview_irq_uninstall(struct drm_device *dev)
3051{
3052 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3053 int pipe;
3054
3055 if (!dev_priv)
3056 return;
3057
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003058 intel_hpd_irq_uninstall(dev_priv);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003059
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003060 for_each_pipe(pipe)
3061 I915_WRITE(PIPESTAT(pipe), 0xffff);
3062
3063 I915_WRITE(HWSTAM, 0xffffffff);
3064 I915_WRITE(PORT_HOTPLUG_EN, 0);
3065 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3066 for_each_pipe(pipe)
3067 I915_WRITE(PIPESTAT(pipe), 0xffff);
3068 I915_WRITE(VLV_IIR, 0xffffffff);
3069 I915_WRITE(VLV_IMR, 0xffffffff);
3070 I915_WRITE(VLV_IER, 0x0);
3071 POSTING_READ(VLV_IER);
3072}
3073
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003074static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003075{
3076 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes46979952011-04-07 13:53:55 -07003077
3078 if (!dev_priv)
3079 return;
3080
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003081 intel_hpd_irq_uninstall(dev_priv);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003082
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003083 I915_WRITE(HWSTAM, 0xffffffff);
3084
3085 I915_WRITE(DEIMR, 0xffffffff);
3086 I915_WRITE(DEIER, 0x0);
3087 I915_WRITE(DEIIR, I915_READ(DEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03003088 if (IS_GEN7(dev))
3089 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003090
3091 I915_WRITE(GTIMR, 0xffffffff);
3092 I915_WRITE(GTIER, 0x0);
3093 I915_WRITE(GTIIR, I915_READ(GTIIR));
Keith Packard192aac1f2011-09-20 10:12:44 -07003094
Ben Widawskyab5c6082013-04-05 13:12:41 -07003095 if (HAS_PCH_NOP(dev))
3096 return;
3097
Keith Packard192aac1f2011-09-20 10:12:44 -07003098 I915_WRITE(SDEIMR, 0xffffffff);
3099 I915_WRITE(SDEIER, 0x0);
3100 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03003101 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3102 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003103}
3104
Chris Wilsonc2798b12012-04-22 21:13:57 +01003105static void i8xx_irq_preinstall(struct drm_device * dev)
3106{
3107 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3108 int pipe;
3109
3110 atomic_set(&dev_priv->irq_received, 0);
3111
3112 for_each_pipe(pipe)
3113 I915_WRITE(PIPESTAT(pipe), 0);
3114 I915_WRITE16(IMR, 0xffff);
3115 I915_WRITE16(IER, 0x0);
3116 POSTING_READ16(IER);
3117}
3118
3119static int i8xx_irq_postinstall(struct drm_device *dev)
3120{
3121 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter379ef822013-10-16 22:55:56 +02003122 unsigned long irqflags;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003123
Chris Wilsonc2798b12012-04-22 21:13:57 +01003124 I915_WRITE16(EMR,
3125 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3126
3127 /* Unmask the interrupts that we always want on. */
3128 dev_priv->irq_mask =
3129 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3130 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3131 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3132 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3133 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3134 I915_WRITE16(IMR, dev_priv->irq_mask);
3135
3136 I915_WRITE16(IER,
3137 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3138 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3139 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3140 I915_USER_INTERRUPT);
3141 POSTING_READ16(IER);
3142
Daniel Vetter379ef822013-10-16 22:55:56 +02003143 /* Interrupt setup is already guaranteed to be single-threaded, this is
3144 * just to make the assert_spin_locked check happy. */
3145 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02003146 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3147 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
Daniel Vetter379ef822013-10-16 22:55:56 +02003148 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3149
Chris Wilsonc2798b12012-04-22 21:13:57 +01003150 return 0;
3151}
3152
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003153/*
3154 * Returns true when a page flip has completed.
3155 */
3156static bool i8xx_handle_vblank(struct drm_device *dev,
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003157 int plane, int pipe, u32 iir)
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003158{
3159 drm_i915_private_t *dev_priv = dev->dev_private;
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003160 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003161
3162 if (!drm_handle_vblank(dev, pipe))
3163 return false;
3164
3165 if ((iir & flip_pending) == 0)
3166 return false;
3167
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003168 intel_prepare_page_flip(dev, plane);
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003169
3170 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3171 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3172 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3173 * the flip is completed (no longer pending). Since this doesn't raise
3174 * an interrupt per se, we watch for the change at vblank.
3175 */
3176 if (I915_READ16(ISR) & flip_pending)
3177 return false;
3178
3179 intel_finish_page_flip(dev, pipe);
3180
3181 return true;
3182}
3183
Daniel Vetterff1f5252012-10-02 15:10:55 +02003184static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003185{
3186 struct drm_device *dev = (struct drm_device *) arg;
3187 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003188 u16 iir, new_iir;
3189 u32 pipe_stats[2];
3190 unsigned long irqflags;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003191 int pipe;
3192 u16 flip_mask =
3193 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3194 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3195
3196 atomic_inc(&dev_priv->irq_received);
3197
3198 iir = I915_READ16(IIR);
3199 if (iir == 0)
3200 return IRQ_NONE;
3201
3202 while (iir & ~flip_mask) {
3203 /* Can't rely on pipestat interrupt bit in iir as it might
3204 * have been cleared after the pipestat interrupt was received.
3205 * It doesn't set the bit in iir again, but it still produces
3206 * interrupts (for non-MSI).
3207 */
3208 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3209 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3210 i915_handle_error(dev, false);
3211
3212 for_each_pipe(pipe) {
3213 int reg = PIPESTAT(pipe);
3214 pipe_stats[pipe] = I915_READ(reg);
3215
3216 /*
3217 * Clear the PIPE*STAT regs before the IIR
3218 */
3219 if (pipe_stats[pipe] & 0x8000ffff) {
3220 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3221 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3222 pipe_name(pipe));
3223 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003224 }
3225 }
3226 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3227
3228 I915_WRITE16(IIR, iir & ~flip_mask);
3229 new_iir = I915_READ16(IIR); /* Flush posted writes */
3230
Daniel Vetterd05c6172012-04-26 23:28:09 +02003231 i915_update_dri1_breadcrumb(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003232
3233 if (iir & I915_USER_INTERRUPT)
3234 notify_ring(dev, &dev_priv->ring[RCS]);
3235
Daniel Vetter4356d582013-10-16 22:55:55 +02003236 for_each_pipe(pipe) {
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003237 int plane = pipe;
Daniel Vetter3a77c4c2014-01-10 08:50:12 +01003238 if (HAS_FBC(dev))
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003239 plane = !plane;
3240
Daniel Vetter4356d582013-10-16 22:55:55 +02003241 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä1f1c2e22013-11-28 17:30:01 +02003242 i8xx_handle_vblank(dev, plane, pipe, iir))
3243 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003244
Daniel Vetter4356d582013-10-16 22:55:55 +02003245 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003246 i9xx_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter4356d582013-10-16 22:55:55 +02003247 }
Chris Wilsonc2798b12012-04-22 21:13:57 +01003248
3249 iir = new_iir;
3250 }
3251
3252 return IRQ_HANDLED;
3253}
3254
3255static void i8xx_irq_uninstall(struct drm_device * dev)
3256{
3257 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3258 int pipe;
3259
Chris Wilsonc2798b12012-04-22 21:13:57 +01003260 for_each_pipe(pipe) {
3261 /* Clear enable bits; then clear status bits */
3262 I915_WRITE(PIPESTAT(pipe), 0);
3263 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3264 }
3265 I915_WRITE16(IMR, 0xffff);
3266 I915_WRITE16(IER, 0x0);
3267 I915_WRITE16(IIR, I915_READ16(IIR));
3268}
3269
Chris Wilsona266c7d2012-04-24 22:59:44 +01003270static void i915_irq_preinstall(struct drm_device * dev)
3271{
3272 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3273 int pipe;
3274
3275 atomic_set(&dev_priv->irq_received, 0);
3276
3277 if (I915_HAS_HOTPLUG(dev)) {
3278 I915_WRITE(PORT_HOTPLUG_EN, 0);
3279 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3280 }
3281
Chris Wilson00d98eb2012-04-24 22:59:48 +01003282 I915_WRITE16(HWSTAM, 0xeffe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003283 for_each_pipe(pipe)
3284 I915_WRITE(PIPESTAT(pipe), 0);
3285 I915_WRITE(IMR, 0xffffffff);
3286 I915_WRITE(IER, 0x0);
3287 POSTING_READ(IER);
3288}
3289
3290static int i915_irq_postinstall(struct drm_device *dev)
3291{
3292 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson38bde182012-04-24 22:59:50 +01003293 u32 enable_mask;
Daniel Vetter379ef822013-10-16 22:55:56 +02003294 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003295
Chris Wilson38bde182012-04-24 22:59:50 +01003296 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3297
3298 /* Unmask the interrupts that we always want on. */
3299 dev_priv->irq_mask =
3300 ~(I915_ASLE_INTERRUPT |
3301 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3302 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3303 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3304 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3305 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3306
3307 enable_mask =
3308 I915_ASLE_INTERRUPT |
3309 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3310 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3311 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3312 I915_USER_INTERRUPT;
3313
Chris Wilsona266c7d2012-04-24 22:59:44 +01003314 if (I915_HAS_HOTPLUG(dev)) {
Daniel Vetter20afbda2012-12-11 14:05:07 +01003315 I915_WRITE(PORT_HOTPLUG_EN, 0);
3316 POSTING_READ(PORT_HOTPLUG_EN);
3317
Chris Wilsona266c7d2012-04-24 22:59:44 +01003318 /* Enable in IER... */
3319 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3320 /* and unmask in IMR */
3321 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3322 }
3323
Chris Wilsona266c7d2012-04-24 22:59:44 +01003324 I915_WRITE(IMR, dev_priv->irq_mask);
3325 I915_WRITE(IER, enable_mask);
3326 POSTING_READ(IER);
3327
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003328 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003329
Daniel Vetter379ef822013-10-16 22:55:56 +02003330 /* Interrupt setup is already guaranteed to be single-threaded, this is
3331 * just to make the assert_spin_locked check happy. */
3332 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02003333 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3334 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
Daniel Vetter379ef822013-10-16 22:55:56 +02003335 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3336
Daniel Vetter20afbda2012-12-11 14:05:07 +01003337 return 0;
3338}
3339
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003340/*
3341 * Returns true when a page flip has completed.
3342 */
3343static bool i915_handle_vblank(struct drm_device *dev,
3344 int plane, int pipe, u32 iir)
3345{
3346 drm_i915_private_t *dev_priv = dev->dev_private;
3347 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3348
3349 if (!drm_handle_vblank(dev, pipe))
3350 return false;
3351
3352 if ((iir & flip_pending) == 0)
3353 return false;
3354
3355 intel_prepare_page_flip(dev, plane);
3356
3357 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3358 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3359 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3360 * the flip is completed (no longer pending). Since this doesn't raise
3361 * an interrupt per se, we watch for the change at vblank.
3362 */
3363 if (I915_READ(ISR) & flip_pending)
3364 return false;
3365
3366 intel_finish_page_flip(dev, pipe);
3367
3368 return true;
3369}
3370
Daniel Vetterff1f5252012-10-02 15:10:55 +02003371static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003372{
3373 struct drm_device *dev = (struct drm_device *) arg;
3374 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson8291ee92012-04-24 22:59:47 +01003375 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003376 unsigned long irqflags;
Chris Wilson38bde182012-04-24 22:59:50 +01003377 u32 flip_mask =
3378 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3379 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilson38bde182012-04-24 22:59:50 +01003380 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003381
3382 atomic_inc(&dev_priv->irq_received);
3383
3384 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01003385 do {
3386 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01003387 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003388
3389 /* Can't rely on pipestat interrupt bit in iir as it might
3390 * have been cleared after the pipestat interrupt was received.
3391 * It doesn't set the bit in iir again, but it still produces
3392 * interrupts (for non-MSI).
3393 */
3394 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3395 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3396 i915_handle_error(dev, false);
3397
3398 for_each_pipe(pipe) {
3399 int reg = PIPESTAT(pipe);
3400 pipe_stats[pipe] = I915_READ(reg);
3401
Chris Wilson38bde182012-04-24 22:59:50 +01003402 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003403 if (pipe_stats[pipe] & 0x8000ffff) {
3404 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3405 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3406 pipe_name(pipe));
3407 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01003408 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003409 }
3410 }
3411 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3412
3413 if (!irq_received)
3414 break;
3415
Chris Wilsona266c7d2012-04-24 22:59:44 +01003416 /* Consume port. Then clear IIR or we'll miss events */
3417 if ((I915_HAS_HOTPLUG(dev)) &&
3418 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3419 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02003420 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003421
Daniel Vetter91d131d2013-06-27 17:52:14 +02003422 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3423
Chris Wilsona266c7d2012-04-24 22:59:44 +01003424 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
Chris Wilson38bde182012-04-24 22:59:50 +01003425 POSTING_READ(PORT_HOTPLUG_STAT);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003426 }
3427
Chris Wilson38bde182012-04-24 22:59:50 +01003428 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003429 new_iir = I915_READ(IIR); /* Flush posted writes */
3430
Chris Wilsona266c7d2012-04-24 22:59:44 +01003431 if (iir & I915_USER_INTERRUPT)
3432 notify_ring(dev, &dev_priv->ring[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003433
Chris Wilsona266c7d2012-04-24 22:59:44 +01003434 for_each_pipe(pipe) {
Chris Wilson38bde182012-04-24 22:59:50 +01003435 int plane = pipe;
Daniel Vetter3a77c4c2014-01-10 08:50:12 +01003436 if (HAS_FBC(dev))
Chris Wilson38bde182012-04-24 22:59:50 +01003437 plane = !plane;
Ville Syrjälä5e2032d2013-02-19 15:16:38 +02003438
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003439 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3440 i915_handle_vblank(dev, plane, pipe, iir))
3441 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003442
3443 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3444 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003445
3446 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003447 i9xx_pipe_crc_irq_handler(dev, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003448 }
3449
Chris Wilsona266c7d2012-04-24 22:59:44 +01003450 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3451 intel_opregion_asle_intr(dev);
3452
3453 /* With MSI, interrupts are only generated when iir
3454 * transitions from zero to nonzero. If another bit got
3455 * set while we were handling the existing iir bits, then
3456 * we would never get another interrupt.
3457 *
3458 * This is fine on non-MSI as well, as if we hit this path
3459 * we avoid exiting the interrupt handler only to generate
3460 * another one.
3461 *
3462 * Note that for MSI this could cause a stray interrupt report
3463 * if an interrupt landed in the time between writing IIR and
3464 * the posting read. This should be rare enough to never
3465 * trigger the 99% of 100,000 interrupts test for disabling
3466 * stray interrupts.
3467 */
Chris Wilson38bde182012-04-24 22:59:50 +01003468 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003469 iir = new_iir;
Chris Wilson38bde182012-04-24 22:59:50 +01003470 } while (iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003471
Daniel Vetterd05c6172012-04-26 23:28:09 +02003472 i915_update_dri1_breadcrumb(dev);
Chris Wilson8291ee92012-04-24 22:59:47 +01003473
Chris Wilsona266c7d2012-04-24 22:59:44 +01003474 return ret;
3475}
3476
3477static void i915_irq_uninstall(struct drm_device * dev)
3478{
3479 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3480 int pipe;
3481
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003482 intel_hpd_irq_uninstall(dev_priv);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003483
Chris Wilsona266c7d2012-04-24 22:59:44 +01003484 if (I915_HAS_HOTPLUG(dev)) {
3485 I915_WRITE(PORT_HOTPLUG_EN, 0);
3486 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3487 }
3488
Chris Wilson00d98eb2012-04-24 22:59:48 +01003489 I915_WRITE16(HWSTAM, 0xffff);
Chris Wilson55b39752012-04-24 22:59:49 +01003490 for_each_pipe(pipe) {
3491 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003492 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01003493 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3494 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003495 I915_WRITE(IMR, 0xffffffff);
3496 I915_WRITE(IER, 0x0);
3497
Chris Wilsona266c7d2012-04-24 22:59:44 +01003498 I915_WRITE(IIR, I915_READ(IIR));
3499}
3500
3501static void i965_irq_preinstall(struct drm_device * dev)
3502{
3503 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3504 int pipe;
3505
3506 atomic_set(&dev_priv->irq_received, 0);
3507
Chris Wilsonadca4732012-05-11 18:01:31 +01003508 I915_WRITE(PORT_HOTPLUG_EN, 0);
3509 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003510
3511 I915_WRITE(HWSTAM, 0xeffe);
3512 for_each_pipe(pipe)
3513 I915_WRITE(PIPESTAT(pipe), 0);
3514 I915_WRITE(IMR, 0xffffffff);
3515 I915_WRITE(IER, 0x0);
3516 POSTING_READ(IER);
3517}
3518
3519static int i965_irq_postinstall(struct drm_device *dev)
3520{
3521 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003522 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003523 u32 error_mask;
Daniel Vetterb79480b2013-06-27 17:52:10 +02003524 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003525
Chris Wilsona266c7d2012-04-24 22:59:44 +01003526 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003527 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01003528 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003529 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3530 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3531 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3532 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3533 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3534
3535 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003536 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3537 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003538 enable_mask |= I915_USER_INTERRUPT;
3539
3540 if (IS_G4X(dev))
3541 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003542
Daniel Vetterb79480b2013-06-27 17:52:10 +02003543 /* Interrupt setup is already guaranteed to be single-threaded, this is
3544 * just to make the assert_spin_locked check happy. */
3545 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02003546 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
3547 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3548 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
Daniel Vetterb79480b2013-06-27 17:52:10 +02003549 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003550
Chris Wilsona266c7d2012-04-24 22:59:44 +01003551 /*
3552 * Enable some error detection, note the instruction error mask
3553 * bit is reserved, so we leave it masked.
3554 */
3555 if (IS_G4X(dev)) {
3556 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3557 GM45_ERROR_MEM_PRIV |
3558 GM45_ERROR_CP_PRIV |
3559 I915_ERROR_MEMORY_REFRESH);
3560 } else {
3561 error_mask = ~(I915_ERROR_PAGE_TABLE |
3562 I915_ERROR_MEMORY_REFRESH);
3563 }
3564 I915_WRITE(EMR, error_mask);
3565
3566 I915_WRITE(IMR, dev_priv->irq_mask);
3567 I915_WRITE(IER, enable_mask);
3568 POSTING_READ(IER);
3569
Daniel Vetter20afbda2012-12-11 14:05:07 +01003570 I915_WRITE(PORT_HOTPLUG_EN, 0);
3571 POSTING_READ(PORT_HOTPLUG_EN);
3572
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003573 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003574
3575 return 0;
3576}
3577
Egbert Eichbac56d52013-02-25 12:06:51 -05003578static void i915_hpd_irq_setup(struct drm_device *dev)
Daniel Vetter20afbda2012-12-11 14:05:07 +01003579{
3580 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Egbert Eiche5868a32013-02-28 04:17:12 -05003581 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +02003582 struct intel_encoder *intel_encoder;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003583 u32 hotplug_en;
3584
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003585 assert_spin_locked(&dev_priv->irq_lock);
3586
Egbert Eichbac56d52013-02-25 12:06:51 -05003587 if (I915_HAS_HOTPLUG(dev)) {
3588 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3589 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3590 /* Note HDMI and DP share hotplug bits */
Egbert Eiche5868a32013-02-28 04:17:12 -05003591 /* enable bits are the same for all generations */
Egbert Eichcd569ae2013-04-16 13:36:57 +02003592 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3593 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3594 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
Egbert Eichbac56d52013-02-25 12:06:51 -05003595 /* Programming the CRT detection parameters tends
3596 to generate a spurious hotplug event about three
3597 seconds later. So just do it once.
3598 */
3599 if (IS_G4X(dev))
3600 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Daniel Vetter85fc95b2013-03-27 15:47:11 +01003601 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
Egbert Eichbac56d52013-02-25 12:06:51 -05003602 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003603
Egbert Eichbac56d52013-02-25 12:06:51 -05003604 /* Ignore TV since it's buggy */
3605 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3606 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003607}
3608
Daniel Vetterff1f5252012-10-02 15:10:55 +02003609static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003610{
3611 struct drm_device *dev = (struct drm_device *) arg;
3612 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003613 u32 iir, new_iir;
3614 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003615 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003616 int ret = IRQ_NONE, pipe;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003617 u32 flip_mask =
3618 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3619 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003620
3621 atomic_inc(&dev_priv->irq_received);
3622
3623 iir = I915_READ(IIR);
3624
Chris Wilsona266c7d2012-04-24 22:59:44 +01003625 for (;;) {
Ville Syrjälä501e01d2014-01-17 11:35:15 +02003626 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson2c8ba292012-04-24 22:59:46 +01003627 bool blc_event = false;
3628
Chris Wilsona266c7d2012-04-24 22:59:44 +01003629 /* Can't rely on pipestat interrupt bit in iir as it might
3630 * have been cleared after the pipestat interrupt was received.
3631 * It doesn't set the bit in iir again, but it still produces
3632 * interrupts (for non-MSI).
3633 */
3634 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3635 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3636 i915_handle_error(dev, false);
3637
3638 for_each_pipe(pipe) {
3639 int reg = PIPESTAT(pipe);
3640 pipe_stats[pipe] = I915_READ(reg);
3641
3642 /*
3643 * Clear the PIPE*STAT regs before the IIR
3644 */
3645 if (pipe_stats[pipe] & 0x8000ffff) {
3646 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3647 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3648 pipe_name(pipe));
3649 I915_WRITE(reg, pipe_stats[pipe]);
Ville Syrjälä501e01d2014-01-17 11:35:15 +02003650 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003651 }
3652 }
3653 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3654
3655 if (!irq_received)
3656 break;
3657
3658 ret = IRQ_HANDLED;
3659
3660 /* Consume port. Then clear IIR or we'll miss events */
Chris Wilsonadca4732012-05-11 18:01:31 +01003661 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003662 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02003663 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3664 HOTPLUG_INT_STATUS_G4X :
Daniel Vetter4f7fd702013-06-24 21:33:28 +02003665 HOTPLUG_INT_STATUS_I915);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003666
Daniel Vetter91d131d2013-06-27 17:52:14 +02003667 intel_hpd_irq_handler(dev, hotplug_trigger,
Daniel Vetter704cfb82013-12-18 09:08:43 +01003668 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
Daniel Vetter91d131d2013-06-27 17:52:14 +02003669
Daniel Vetter4aeebd72013-10-31 09:53:36 +01003670 if (IS_G4X(dev) &&
3671 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
3672 dp_aux_irq_handler(dev);
3673
Chris Wilsona266c7d2012-04-24 22:59:44 +01003674 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3675 I915_READ(PORT_HOTPLUG_STAT);
3676 }
3677
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003678 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003679 new_iir = I915_READ(IIR); /* Flush posted writes */
3680
Chris Wilsona266c7d2012-04-24 22:59:44 +01003681 if (iir & I915_USER_INTERRUPT)
3682 notify_ring(dev, &dev_priv->ring[RCS]);
3683 if (iir & I915_BSD_USER_INTERRUPT)
3684 notify_ring(dev, &dev_priv->ring[VCS]);
3685
Chris Wilsona266c7d2012-04-24 22:59:44 +01003686 for_each_pipe(pipe) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01003687 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003688 i915_handle_vblank(dev, pipe, pipe, iir))
3689 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003690
3691 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3692 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003693
3694 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003695 i9xx_pipe_crc_irq_handler(dev, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003696 }
3697
3698
3699 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3700 intel_opregion_asle_intr(dev);
3701
Daniel Vetter515ac2b2012-12-01 13:53:44 +01003702 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3703 gmbus_irq_handler(dev);
3704
Chris Wilsona266c7d2012-04-24 22:59:44 +01003705 /* With MSI, interrupts are only generated when iir
3706 * transitions from zero to nonzero. If another bit got
3707 * set while we were handling the existing iir bits, then
3708 * we would never get another interrupt.
3709 *
3710 * This is fine on non-MSI as well, as if we hit this path
3711 * we avoid exiting the interrupt handler only to generate
3712 * another one.
3713 *
3714 * Note that for MSI this could cause a stray interrupt report
3715 * if an interrupt landed in the time between writing IIR and
3716 * the posting read. This should be rare enough to never
3717 * trigger the 99% of 100,000 interrupts test for disabling
3718 * stray interrupts.
3719 */
3720 iir = new_iir;
3721 }
3722
Daniel Vetterd05c6172012-04-26 23:28:09 +02003723 i915_update_dri1_breadcrumb(dev);
Chris Wilson2c8ba292012-04-24 22:59:46 +01003724
Chris Wilsona266c7d2012-04-24 22:59:44 +01003725 return ret;
3726}
3727
3728static void i965_irq_uninstall(struct drm_device * dev)
3729{
3730 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3731 int pipe;
3732
3733 if (!dev_priv)
3734 return;
3735
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003736 intel_hpd_irq_uninstall(dev_priv);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003737
Chris Wilsonadca4732012-05-11 18:01:31 +01003738 I915_WRITE(PORT_HOTPLUG_EN, 0);
3739 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003740
3741 I915_WRITE(HWSTAM, 0xffffffff);
3742 for_each_pipe(pipe)
3743 I915_WRITE(PIPESTAT(pipe), 0);
3744 I915_WRITE(IMR, 0xffffffff);
3745 I915_WRITE(IER, 0x0);
3746
3747 for_each_pipe(pipe)
3748 I915_WRITE(PIPESTAT(pipe),
3749 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3750 I915_WRITE(IIR, I915_READ(IIR));
3751}
3752
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003753static void intel_hpd_irq_reenable(unsigned long data)
Egbert Eichac4c16c2013-04-16 13:36:58 +02003754{
3755 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3756 struct drm_device *dev = dev_priv->dev;
3757 struct drm_mode_config *mode_config = &dev->mode_config;
3758 unsigned long irqflags;
3759 int i;
3760
3761 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3762 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3763 struct drm_connector *connector;
3764
3765 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3766 continue;
3767
3768 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3769
3770 list_for_each_entry(connector, &mode_config->connector_list, head) {
3771 struct intel_connector *intel_connector = to_intel_connector(connector);
3772
3773 if (intel_connector->encoder->hpd_pin == i) {
3774 if (connector->polled != intel_connector->polled)
3775 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3776 drm_get_connector_name(connector));
3777 connector->polled = intel_connector->polled;
3778 if (!connector->polled)
3779 connector->polled = DRM_CONNECTOR_POLL_HPD;
3780 }
3781 }
3782 }
3783 if (dev_priv->display.hpd_irq_setup)
3784 dev_priv->display.hpd_irq_setup(dev);
3785 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3786}
3787
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003788void intel_irq_init(struct drm_device *dev)
3789{
Chris Wilson8b2e3262012-04-24 22:59:41 +01003790 struct drm_i915_private *dev_priv = dev->dev_private;
3791
3792 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
Daniel Vetter99584db2012-11-14 17:14:04 +01003793 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003794 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01003795 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Chris Wilson8b2e3262012-04-24 22:59:41 +01003796
Daniel Vetter99584db2012-11-14 17:14:04 +01003797 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3798 i915_hangcheck_elapsed,
Daniel Vetter61bac782012-12-01 21:03:21 +01003799 (unsigned long) dev);
Ville Syrjälä3ca1cce2014-01-17 13:43:51 +02003800 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
Egbert Eichac4c16c2013-04-16 13:36:58 +02003801 (unsigned long) dev_priv);
Daniel Vetter61bac782012-12-01 21:03:21 +01003802
Tomas Janousek97a19a22012-12-08 13:48:13 +01003803 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01003804
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +03003805 if (IS_GEN2(dev)) {
3806 dev->max_vblank_count = 0;
3807 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
3808 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003809 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3810 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
Ville Syrjälä391f75e2013-09-25 19:55:26 +03003811 } else {
3812 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3813 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003814 }
3815
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03003816 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
Keith Packardc3613de2011-08-12 17:05:54 -07003817 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03003818 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3819 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003820
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003821 if (IS_VALLEYVIEW(dev)) {
3822 dev->driver->irq_handler = valleyview_irq_handler;
3823 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3824 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3825 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3826 dev->driver->enable_vblank = valleyview_enable_vblank;
3827 dev->driver->disable_vblank = valleyview_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05003828 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003829 } else if (IS_GEN8(dev)) {
3830 dev->driver->irq_handler = gen8_irq_handler;
3831 dev->driver->irq_preinstall = gen8_irq_preinstall;
3832 dev->driver->irq_postinstall = gen8_irq_postinstall;
3833 dev->driver->irq_uninstall = gen8_irq_uninstall;
3834 dev->driver->enable_vblank = gen8_enable_vblank;
3835 dev->driver->disable_vblank = gen8_disable_vblank;
3836 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003837 } else if (HAS_PCH_SPLIT(dev)) {
3838 dev->driver->irq_handler = ironlake_irq_handler;
3839 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3840 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3841 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3842 dev->driver->enable_vblank = ironlake_enable_vblank;
3843 dev->driver->disable_vblank = ironlake_disable_vblank;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003844 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003845 } else {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003846 if (INTEL_INFO(dev)->gen == 2) {
3847 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3848 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3849 dev->driver->irq_handler = i8xx_irq_handler;
3850 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003851 } else if (INTEL_INFO(dev)->gen == 3) {
3852 dev->driver->irq_preinstall = i915_irq_preinstall;
3853 dev->driver->irq_postinstall = i915_irq_postinstall;
3854 dev->driver->irq_uninstall = i915_irq_uninstall;
3855 dev->driver->irq_handler = i915_irq_handler;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003856 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003857 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003858 dev->driver->irq_preinstall = i965_irq_preinstall;
3859 dev->driver->irq_postinstall = i965_irq_postinstall;
3860 dev->driver->irq_uninstall = i965_irq_uninstall;
3861 dev->driver->irq_handler = i965_irq_handler;
Egbert Eichbac56d52013-02-25 12:06:51 -05003862 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003863 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003864 dev->driver->enable_vblank = i915_enable_vblank;
3865 dev->driver->disable_vblank = i915_disable_vblank;
3866 }
3867}
Daniel Vetter20afbda2012-12-11 14:05:07 +01003868
3869void intel_hpd_init(struct drm_device *dev)
3870{
3871 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eich821450c2013-04-16 13:36:55 +02003872 struct drm_mode_config *mode_config = &dev->mode_config;
3873 struct drm_connector *connector;
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003874 unsigned long irqflags;
Egbert Eich821450c2013-04-16 13:36:55 +02003875 int i;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003876
Egbert Eich821450c2013-04-16 13:36:55 +02003877 for (i = 1; i < HPD_NUM_PINS; i++) {
3878 dev_priv->hpd_stats[i].hpd_cnt = 0;
3879 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3880 }
3881 list_for_each_entry(connector, &mode_config->connector_list, head) {
3882 struct intel_connector *intel_connector = to_intel_connector(connector);
3883 connector->polled = intel_connector->polled;
3884 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3885 connector->polled = DRM_CONNECTOR_POLL_HPD;
3886 }
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003887
3888 /* Interrupt setup is already guaranteed to be single-threaded, this is
3889 * just to make the assert_spin_locked checks happy. */
3890 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003891 if (dev_priv->display.hpd_irq_setup)
3892 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003893 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003894}
Paulo Zanonic67a4702013-08-19 13:18:09 -03003895
3896/* Disable interrupts so we can allow Package C8+. */
3897void hsw_pc8_disable_interrupts(struct drm_device *dev)
3898{
3899 struct drm_i915_private *dev_priv = dev->dev_private;
3900 unsigned long irqflags;
3901
3902 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3903
3904 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
3905 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3906 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3907 dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3908 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3909
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02003910 ironlake_disable_display_irq(dev_priv, 0xffffffff);
3911 ibx_disable_display_interrupt(dev_priv, 0xffffffff);
Paulo Zanonic67a4702013-08-19 13:18:09 -03003912 ilk_disable_gt_irq(dev_priv, 0xffffffff);
3913 snb_disable_pm_irq(dev_priv, 0xffffffff);
3914
3915 dev_priv->pc8.irqs_disabled = true;
3916
3917 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3918}
3919
3920/* Restore interrupts so we can recover from Package C8+. */
3921void hsw_pc8_restore_interrupts(struct drm_device *dev)
3922{
3923 struct drm_i915_private *dev_priv = dev->dev_private;
3924 unsigned long irqflags;
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02003925 uint32_t val;
Paulo Zanonic67a4702013-08-19 13:18:09 -03003926
3927 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3928
3929 val = I915_READ(DEIMR);
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02003930 WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
Paulo Zanonic67a4702013-08-19 13:18:09 -03003931
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02003932 val = I915_READ(SDEIMR);
3933 WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
Paulo Zanonic67a4702013-08-19 13:18:09 -03003934
3935 val = I915_READ(GTIMR);
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02003936 WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
Paulo Zanonic67a4702013-08-19 13:18:09 -03003937
3938 val = I915_READ(GEN6_PMIMR);
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02003939 WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
Paulo Zanonic67a4702013-08-19 13:18:09 -03003940
3941 dev_priv->pc8.irqs_disabled = false;
3942
3943 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
Paulo Zanoni1f2d4532013-11-21 13:47:25 -02003944 ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr);
Paulo Zanonic67a4702013-08-19 13:18:09 -03003945 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3946 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3947 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
3948
3949 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3950}