blob: f13d5edc39d56c9bdfe3091a9218872ef59be071 [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Damien Lespiaub2c88f52013-10-15 18:55:29 +010033#include <linux/circ_buf.h>
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/drmP.h>
35#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010037#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Egbert Eiche5868a32013-02-28 04:17:12 -050040static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46};
47
48static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010050 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050051 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54};
55
56static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63};
64
65static const u32 hpd_status_gen4[] = {
66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72};
73
Egbert Eiche5868a32013-02-28 04:17:12 -050074static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81};
82
Zhenyu Wang036a4a72009-06-08 14:40:19 +080083/* For display hotplug interrupt */
Chris Wilson995b6762010-08-20 13:23:26 +010084static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -050085ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +080086{
Daniel Vetter4bc9d432013-06-27 13:44:58 +020087 assert_spin_locked(&dev_priv->irq_lock);
88
Paulo Zanonic67a4702013-08-19 13:18:09 -030089 if (dev_priv->pc8.irqs_disabled) {
90 WARN(1, "IRQs disabled\n");
91 dev_priv->pc8.regsave.deimr &= ~mask;
92 return;
93 }
94
Chris Wilson1ec14ad2010-12-04 11:30:53 +000095 if ((dev_priv->irq_mask & mask) != 0) {
96 dev_priv->irq_mask &= ~mask;
97 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +000098 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +080099 }
100}
101
Paulo Zanoni0ff98002013-02-22 17:05:31 -0300102static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -0500103ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800104{
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200105 assert_spin_locked(&dev_priv->irq_lock);
106
Paulo Zanonic67a4702013-08-19 13:18:09 -0300107 if (dev_priv->pc8.irqs_disabled) {
108 WARN(1, "IRQs disabled\n");
109 dev_priv->pc8.regsave.deimr |= mask;
110 return;
111 }
112
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000113 if ((dev_priv->irq_mask & mask) != mask) {
114 dev_priv->irq_mask |= mask;
115 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000116 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800117 }
118}
119
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300120/**
121 * ilk_update_gt_irq - update GTIMR
122 * @dev_priv: driver private
123 * @interrupt_mask: mask of interrupt bits to update
124 * @enabled_irq_mask: mask of interrupt bits to enable
125 */
126static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
127 uint32_t interrupt_mask,
128 uint32_t enabled_irq_mask)
129{
130 assert_spin_locked(&dev_priv->irq_lock);
131
Paulo Zanonic67a4702013-08-19 13:18:09 -0300132 if (dev_priv->pc8.irqs_disabled) {
133 WARN(1, "IRQs disabled\n");
134 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
135 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
136 interrupt_mask);
137 return;
138 }
139
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300140 dev_priv->gt_irq_mask &= ~interrupt_mask;
141 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
142 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
143 POSTING_READ(GTIMR);
144}
145
146void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
147{
148 ilk_update_gt_irq(dev_priv, mask, mask);
149}
150
151void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
152{
153 ilk_update_gt_irq(dev_priv, mask, 0);
154}
155
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300156/**
157 * snb_update_pm_irq - update GEN6_PMIMR
158 * @dev_priv: driver private
159 * @interrupt_mask: mask of interrupt bits to update
160 * @enabled_irq_mask: mask of interrupt bits to enable
161 */
162static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
163 uint32_t interrupt_mask,
164 uint32_t enabled_irq_mask)
165{
Paulo Zanoni605cd252013-08-06 18:57:15 -0300166 uint32_t new_val;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300167
168 assert_spin_locked(&dev_priv->irq_lock);
169
Paulo Zanonic67a4702013-08-19 13:18:09 -0300170 if (dev_priv->pc8.irqs_disabled) {
171 WARN(1, "IRQs disabled\n");
172 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
173 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
174 interrupt_mask);
175 return;
176 }
177
Paulo Zanoni605cd252013-08-06 18:57:15 -0300178 new_val = dev_priv->pm_irq_mask;
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300179 new_val &= ~interrupt_mask;
180 new_val |= (~enabled_irq_mask & interrupt_mask);
181
Paulo Zanoni605cd252013-08-06 18:57:15 -0300182 if (new_val != dev_priv->pm_irq_mask) {
183 dev_priv->pm_irq_mask = new_val;
184 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300185 POSTING_READ(GEN6_PMIMR);
186 }
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300187}
188
189void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
190{
191 snb_update_pm_irq(dev_priv, mask, mask);
192}
193
194void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
195{
196 snb_update_pm_irq(dev_priv, mask, 0);
197}
198
Paulo Zanoni86642812013-04-12 17:57:57 -0300199static bool ivb_can_enable_err_int(struct drm_device *dev)
200{
201 struct drm_i915_private *dev_priv = dev->dev_private;
202 struct intel_crtc *crtc;
203 enum pipe pipe;
204
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200205 assert_spin_locked(&dev_priv->irq_lock);
206
Paulo Zanoni86642812013-04-12 17:57:57 -0300207 for_each_pipe(pipe) {
208 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
209
210 if (crtc->cpu_fifo_underrun_disabled)
211 return false;
212 }
213
214 return true;
215}
216
217static bool cpt_can_enable_serr_int(struct drm_device *dev)
218{
219 struct drm_i915_private *dev_priv = dev->dev_private;
220 enum pipe pipe;
221 struct intel_crtc *crtc;
222
Daniel Vetterfee884e2013-07-04 23:35:21 +0200223 assert_spin_locked(&dev_priv->irq_lock);
224
Paulo Zanoni86642812013-04-12 17:57:57 -0300225 for_each_pipe(pipe) {
226 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
227
228 if (crtc->pch_fifo_underrun_disabled)
229 return false;
230 }
231
232 return true;
233}
234
235static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
236 enum pipe pipe, bool enable)
237{
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
240 DE_PIPEB_FIFO_UNDERRUN;
241
242 if (enable)
243 ironlake_enable_display_irq(dev_priv, bit);
244 else
245 ironlake_disable_display_irq(dev_priv, bit);
246}
247
248static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
Daniel Vetter7336df62013-07-09 22:59:16 +0200249 enum pipe pipe, bool enable)
Paulo Zanoni86642812013-04-12 17:57:57 -0300250{
251 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni86642812013-04-12 17:57:57 -0300252 if (enable) {
Daniel Vetter7336df62013-07-09 22:59:16 +0200253 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
254
Paulo Zanoni86642812013-04-12 17:57:57 -0300255 if (!ivb_can_enable_err_int(dev))
256 return;
257
Paulo Zanoni86642812013-04-12 17:57:57 -0300258 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
259 } else {
Daniel Vetter7336df62013-07-09 22:59:16 +0200260 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
261
262 /* Change the state _after_ we've read out the current one. */
Paulo Zanoni86642812013-04-12 17:57:57 -0300263 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
Daniel Vetter7336df62013-07-09 22:59:16 +0200264
265 if (!was_enabled &&
266 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
267 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
268 pipe_name(pipe));
269 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300270 }
271}
272
Daniel Vetter38d83c962013-11-07 11:05:46 +0100273static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
274 enum pipe pipe, bool enable)
275{
276 struct drm_i915_private *dev_priv = dev->dev_private;
277
278 assert_spin_locked(&dev_priv->irq_lock);
279
280 if (enable)
281 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
282 else
283 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
284 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
285 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
286}
287
Daniel Vetterfee884e2013-07-04 23:35:21 +0200288/**
289 * ibx_display_interrupt_update - update SDEIMR
290 * @dev_priv: driver private
291 * @interrupt_mask: mask of interrupt bits to update
292 * @enabled_irq_mask: mask of interrupt bits to enable
293 */
294static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
295 uint32_t interrupt_mask,
296 uint32_t enabled_irq_mask)
297{
298 uint32_t sdeimr = I915_READ(SDEIMR);
299 sdeimr &= ~interrupt_mask;
300 sdeimr |= (~enabled_irq_mask & interrupt_mask);
301
302 assert_spin_locked(&dev_priv->irq_lock);
303
Paulo Zanonic67a4702013-08-19 13:18:09 -0300304 if (dev_priv->pc8.irqs_disabled &&
305 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
306 WARN(1, "IRQs disabled\n");
307 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
308 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
309 interrupt_mask);
310 return;
311 }
312
Daniel Vetterfee884e2013-07-04 23:35:21 +0200313 I915_WRITE(SDEIMR, sdeimr);
314 POSTING_READ(SDEIMR);
315}
316#define ibx_enable_display_interrupt(dev_priv, bits) \
317 ibx_display_interrupt_update((dev_priv), (bits), (bits))
318#define ibx_disable_display_interrupt(dev_priv, bits) \
319 ibx_display_interrupt_update((dev_priv), (bits), 0)
320
Daniel Vetterde280752013-07-04 23:35:24 +0200321static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
322 enum transcoder pch_transcoder,
Paulo Zanoni86642812013-04-12 17:57:57 -0300323 bool enable)
324{
Paulo Zanoni86642812013-04-12 17:57:57 -0300325 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200326 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
327 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
Paulo Zanoni86642812013-04-12 17:57:57 -0300328
329 if (enable)
Daniel Vetterfee884e2013-07-04 23:35:21 +0200330 ibx_enable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300331 else
Daniel Vetterfee884e2013-07-04 23:35:21 +0200332 ibx_disable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300333}
334
335static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
336 enum transcoder pch_transcoder,
337 bool enable)
338{
339 struct drm_i915_private *dev_priv = dev->dev_private;
340
341 if (enable) {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200342 I915_WRITE(SERR_INT,
343 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
344
Paulo Zanoni86642812013-04-12 17:57:57 -0300345 if (!cpt_can_enable_serr_int(dev))
346 return;
347
Daniel Vetterfee884e2013-07-04 23:35:21 +0200348 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Paulo Zanoni86642812013-04-12 17:57:57 -0300349 } else {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200350 uint32_t tmp = I915_READ(SERR_INT);
351 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
352
353 /* Change the state _after_ we've read out the current one. */
Daniel Vetterfee884e2013-07-04 23:35:21 +0200354 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200355
356 if (!was_enabled &&
357 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
358 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
359 transcoder_name(pch_transcoder));
360 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300361 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300362}
363
364/**
365 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
366 * @dev: drm device
367 * @pipe: pipe
368 * @enable: true if we want to report FIFO underrun errors, false otherwise
369 *
370 * This function makes us disable or enable CPU fifo underruns for a specific
371 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
372 * reporting for one pipe may also disable all the other CPU error interruts for
373 * the other pipes, due to the fact that there's just one interrupt mask/enable
374 * bit for all the pipes.
375 *
376 * Returns the previous state of underrun reporting.
377 */
378bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
379 enum pipe pipe, bool enable)
380{
381 struct drm_i915_private *dev_priv = dev->dev_private;
382 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
383 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
384 unsigned long flags;
385 bool ret;
386
387 spin_lock_irqsave(&dev_priv->irq_lock, flags);
388
389 ret = !intel_crtc->cpu_fifo_underrun_disabled;
390
391 if (enable == ret)
392 goto done;
393
394 intel_crtc->cpu_fifo_underrun_disabled = !enable;
395
396 if (IS_GEN5(dev) || IS_GEN6(dev))
397 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
398 else if (IS_GEN7(dev))
Daniel Vetter7336df62013-07-09 22:59:16 +0200399 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
Daniel Vetter38d83c962013-11-07 11:05:46 +0100400 else if (IS_GEN8(dev))
401 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300402
403done:
404 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
405 return ret;
406}
407
408/**
409 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
410 * @dev: drm device
411 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
412 * @enable: true if we want to report FIFO underrun errors, false otherwise
413 *
414 * This function makes us disable or enable PCH fifo underruns for a specific
415 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
416 * underrun reporting for one transcoder may also disable all the other PCH
417 * error interruts for the other transcoders, due to the fact that there's just
418 * one interrupt mask/enable bit for all the transcoders.
419 *
420 * Returns the previous state of underrun reporting.
421 */
422bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
423 enum transcoder pch_transcoder,
424 bool enable)
425{
426 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200427 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
428 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni86642812013-04-12 17:57:57 -0300429 unsigned long flags;
430 bool ret;
431
Daniel Vetterde280752013-07-04 23:35:24 +0200432 /*
433 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
434 * has only one pch transcoder A that all pipes can use. To avoid racy
435 * pch transcoder -> pipe lookups from interrupt code simply store the
436 * underrun statistics in crtc A. Since we never expose this anywhere
437 * nor use it outside of the fifo underrun code here using the "wrong"
438 * crtc on LPT won't cause issues.
439 */
Paulo Zanoni86642812013-04-12 17:57:57 -0300440
441 spin_lock_irqsave(&dev_priv->irq_lock, flags);
442
443 ret = !intel_crtc->pch_fifo_underrun_disabled;
444
445 if (enable == ret)
446 goto done;
447
448 intel_crtc->pch_fifo_underrun_disabled = !enable;
449
450 if (HAS_PCH_IBX(dev))
Daniel Vetterde280752013-07-04 23:35:24 +0200451 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300452 else
453 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
454
455done:
456 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
457 return ret;
458}
459
460
Keith Packard7c463582008-11-04 02:03:27 -0800461void
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200462i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
Keith Packard7c463582008-11-04 02:03:27 -0800463{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200464 u32 reg = PIPESTAT(pipe);
465 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800466
Daniel Vetterb79480b2013-06-27 17:52:10 +0200467 assert_spin_locked(&dev_priv->irq_lock);
468
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200469 if ((pipestat & mask) == mask)
470 return;
471
472 /* Enable the interrupt, clear any pending status */
473 pipestat |= mask | (mask >> 16);
474 I915_WRITE(reg, pipestat);
475 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800476}
477
478void
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200479i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
Keith Packard7c463582008-11-04 02:03:27 -0800480{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200481 u32 reg = PIPESTAT(pipe);
482 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800483
Daniel Vetterb79480b2013-06-27 17:52:10 +0200484 assert_spin_locked(&dev_priv->irq_lock);
485
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200486 if ((pipestat & mask) == 0)
487 return;
488
489 pipestat &= ~mask;
490 I915_WRITE(reg, pipestat);
491 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800492}
493
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000494/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300495 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Zhao Yakui01c66882009-10-28 05:10:00 +0000496 */
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300497static void i915_enable_asle_pipestat(struct drm_device *dev)
Zhao Yakui01c66882009-10-28 05:10:00 +0000498{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000499 drm_i915_private_t *dev_priv = dev->dev_private;
500 unsigned long irqflags;
501
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300502 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
503 return;
504
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000506
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200507 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
Jani Nikulaf8987802013-04-29 13:02:53 +0300508 if (INTEL_INFO(dev)->gen >= 4)
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200509 i915_enable_pipestat(dev_priv, PIPE_A,
510 PIPE_LEGACY_BLC_EVENT_ENABLE);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000511
512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000513}
514
515/**
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700516 * i915_pipe_enabled - check if a pipe is enabled
517 * @dev: DRM device
518 * @pipe: pipe to check
519 *
520 * Reading certain registers when the pipe is disabled can hang the chip.
521 * Use this routine to make sure the PLL is running and the pipe is active
522 * before reading such registers if unsure.
523 */
524static int
525i915_pipe_enabled(struct drm_device *dev, int pipe)
526{
527 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni702e7a52012-10-23 18:29:59 -0200528
Daniel Vettera01025a2013-05-22 00:50:23 +0200529 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
530 /* Locking is horribly broken here, but whatever. */
531 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
532 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni71f8ba62013-05-03 12:15:39 -0300533
Daniel Vettera01025a2013-05-22 00:50:23 +0200534 return intel_crtc->active;
535 } else {
536 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
537 }
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700538}
539
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +0300540static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
541{
542 /* Gen2 doesn't have a hardware frame counter */
543 return 0;
544}
545
Keith Packard42f52ef2008-10-18 19:39:29 -0700546/* Called from drm generic code, passed a 'crtc', which
547 * we use as a pipe index
548 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700549static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700550{
551 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
552 unsigned long high_frame;
553 unsigned long low_frame;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300554 u32 high1, high2, low, pixel, vbl_start;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700555
556 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800557 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800558 "pipe %c\n", pipe_name(pipe));
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700559 return 0;
560 }
561
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300562 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
563 struct intel_crtc *intel_crtc =
564 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
565 const struct drm_display_mode *mode =
566 &intel_crtc->config.adjusted_mode;
567
568 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
569 } else {
570 enum transcoder cpu_transcoder =
571 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
572 u32 htotal;
573
574 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
575 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
576
577 vbl_start *= htotal;
578 }
579
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800580 high_frame = PIPEFRAME(pipe);
581 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100582
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700583 /*
584 * High & low register fields aren't synchronized, so make sure
585 * we get a low value that's stable across two reads of the high
586 * register.
587 */
588 do {
Chris Wilson5eddb702010-09-11 13:48:45 +0100589 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300590 low = I915_READ(low_frame);
Chris Wilson5eddb702010-09-11 13:48:45 +0100591 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700592 } while (high1 != high2);
593
Chris Wilson5eddb702010-09-11 13:48:45 +0100594 high1 >>= PIPE_FRAME_HIGH_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300595 pixel = low & PIPE_PIXEL_MASK;
Chris Wilson5eddb702010-09-11 13:48:45 +0100596 low >>= PIPE_FRAME_LOW_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300597
598 /*
599 * The frame counter increments at beginning of active.
600 * Cook up a vblank counter by also checking the pixel
601 * counter against vblank start.
602 */
603 return ((high1 << 8) | low) + (pixel >= vbl_start);
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700604}
605
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700606static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800607{
608 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800609 int reg = PIPE_FRMCOUNT_GM45(pipe);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800610
611 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800612 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800613 "pipe %c\n", pipe_name(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800614 return 0;
615 }
616
617 return I915_READ(reg);
618}
619
Mario Kleinerad3543e2013-10-30 05:13:08 +0100620/* raw reads, only for fast reads of display block, no need for forcewake etc. */
621#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
622#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
623
624static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300625{
626 struct drm_i915_private *dev_priv = dev->dev_private;
627 uint32_t status;
Mario Kleinerad3543e2013-10-30 05:13:08 +0100628 int reg;
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300629
630 if (IS_VALLEYVIEW(dev)) {
631 status = pipe == PIPE_A ?
632 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
633 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
634
Mario Kleinerad3543e2013-10-30 05:13:08 +0100635 reg = VLV_ISR;
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300636 } else if (IS_GEN2(dev)) {
637 status = pipe == PIPE_A ?
638 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
639 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
640
Mario Kleinerad3543e2013-10-30 05:13:08 +0100641 reg = ISR;
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300642 } else if (INTEL_INFO(dev)->gen < 5) {
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300643 status = pipe == PIPE_A ?
644 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
645 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
646
Mario Kleinerad3543e2013-10-30 05:13:08 +0100647 reg = ISR;
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300648 } else if (INTEL_INFO(dev)->gen < 7) {
649 status = pipe == PIPE_A ?
650 DE_PIPEA_VBLANK :
651 DE_PIPEB_VBLANK;
652
Mario Kleinerad3543e2013-10-30 05:13:08 +0100653 reg = DEISR;
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300654 } else {
655 switch (pipe) {
656 default:
657 case PIPE_A:
658 status = DE_PIPEA_VBLANK_IVB;
659 break;
660 case PIPE_B:
661 status = DE_PIPEB_VBLANK_IVB;
662 break;
663 case PIPE_C:
664 status = DE_PIPEC_VBLANK_IVB;
665 break;
666 }
667
Mario Kleinerad3543e2013-10-30 05:13:08 +0100668 reg = DEISR;
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300669 }
Mario Kleinerad3543e2013-10-30 05:13:08 +0100670
671 if (IS_GEN2(dev))
672 return __raw_i915_read16(dev_priv, reg) & status;
673 else
674 return __raw_i915_read32(dev_priv, reg) & status;
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300675}
676
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700677static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
Mario Kleinerad3543e2013-10-30 05:13:08 +0100678 int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100679{
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300680 struct drm_i915_private *dev_priv = dev->dev_private;
681 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
682 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
683 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300684 int position;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100685 int vbl_start, vbl_end, htotal, vtotal;
686 bool in_vbl = true;
687 int ret = 0;
Mario Kleinerad3543e2013-10-30 05:13:08 +0100688 unsigned long irqflags;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100689
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300690 if (!intel_crtc->active) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100691 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800692 "pipe %c\n", pipe_name(pipe));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100693 return 0;
694 }
695
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300696 htotal = mode->crtc_htotal;
697 vtotal = mode->crtc_vtotal;
698 vbl_start = mode->crtc_vblank_start;
699 vbl_end = mode->crtc_vblank_end;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100700
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300701 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
702
Mario Kleinerad3543e2013-10-30 05:13:08 +0100703 /*
704 * Lock uncore.lock, as we will do multiple timing critical raw
705 * register reads, potentially with preemption disabled, so the
706 * following code must not block on uncore.lock.
707 */
708 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
709
710 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
711
712 /* Get optional system timestamp before query. */
713 if (stime)
714 *stime = ktime_get();
715
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300716 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100717 /* No obvious pixelcount register. Only query vertical
718 * scanout position from Display scan line register.
719 */
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300720 if (IS_GEN2(dev))
Mario Kleinerad3543e2013-10-30 05:13:08 +0100721 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300722 else
Mario Kleinerad3543e2013-10-30 05:13:08 +0100723 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100724
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300725 /*
726 * The scanline counter increments at the leading edge
727 * of hsync, ie. it completely misses the active portion
728 * of the line. Fix up the counter at both edges of vblank
729 * to get a more accurate picture whether we're in vblank
730 * or not.
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100731 */
Mario Kleinerad3543e2013-10-30 05:13:08 +0100732 in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
Ville Syrjälä54ddcbd2013-09-23 13:02:07 +0300733 if ((in_vbl && position == vbl_start - 1) ||
734 (!in_vbl && position == vbl_end - 1))
735 position = (position + 1) % vtotal;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100736 } else {
737 /* Have access to pixelcount since start of frame.
738 * We can split this into vertical and horizontal
739 * scanout position.
740 */
Mario Kleinerad3543e2013-10-30 05:13:08 +0100741 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100742
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300743 /* convert to pixel counts */
744 vbl_start *= htotal;
745 vbl_end *= htotal;
746 vtotal *= htotal;
747 }
748
Mario Kleinerad3543e2013-10-30 05:13:08 +0100749 /* Get optional system timestamp after query. */
750 if (etime)
751 *etime = ktime_get();
752
753 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
754
755 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
756
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300757 in_vbl = position >= vbl_start && position < vbl_end;
758
759 /*
760 * While in vblank, position will be negative
761 * counting up towards 0 at vbl_end. And outside
762 * vblank, position will be positive counting
763 * up since vbl_end.
764 */
765 if (position >= vbl_start)
766 position -= vbl_end;
767 else
768 position += vtotal - vbl_end;
769
Ville Syrjälä7c06b082013-10-11 21:52:43 +0300770 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300771 *vpos = position;
772 *hpos = 0;
773 } else {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100774 *vpos = position / htotal;
775 *hpos = position - (*vpos * htotal);
776 }
777
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100778 /* In vblank? */
779 if (in_vbl)
780 ret |= DRM_SCANOUTPOS_INVBL;
781
782 return ret;
783}
784
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700785static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100786 int *max_error,
787 struct timeval *vblank_time,
788 unsigned flags)
789{
Chris Wilson4041b852011-01-22 10:07:56 +0000790 struct drm_crtc *crtc;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100791
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700792 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
Chris Wilson4041b852011-01-22 10:07:56 +0000793 DRM_ERROR("Invalid crtc %d\n", pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100794 return -EINVAL;
795 }
796
797 /* Get drm_crtc to timestamp: */
Chris Wilson4041b852011-01-22 10:07:56 +0000798 crtc = intel_get_crtc_for_pipe(dev, pipe);
799 if (crtc == NULL) {
800 DRM_ERROR("Invalid crtc %d\n", pipe);
801 return -EINVAL;
802 }
803
804 if (!crtc->enabled) {
805 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
806 return -EBUSY;
807 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100808
809 /* Helper routine in DRM core does all the work: */
Chris Wilson4041b852011-01-22 10:07:56 +0000810 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
811 vblank_time, flags,
812 crtc);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100813}
814
Jani Nikula67c347f2013-09-17 14:26:34 +0300815static bool intel_hpd_irq_event(struct drm_device *dev,
816 struct drm_connector *connector)
Egbert Eich321a1b32013-04-11 16:00:26 +0200817{
818 enum drm_connector_status old_status;
819
820 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
821 old_status = connector->status;
822
823 connector->status = connector->funcs->detect(connector, false);
Jani Nikula67c347f2013-09-17 14:26:34 +0300824 if (old_status == connector->status)
825 return false;
826
827 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
Egbert Eich321a1b32013-04-11 16:00:26 +0200828 connector->base.id,
829 drm_get_connector_name(connector),
Jani Nikula67c347f2013-09-17 14:26:34 +0300830 drm_get_connector_status_name(old_status),
831 drm_get_connector_status_name(connector->status));
832
833 return true;
Egbert Eich321a1b32013-04-11 16:00:26 +0200834}
835
Jesse Barnes5ca58282009-03-31 14:11:15 -0700836/*
837 * Handle hotplug events outside the interrupt handler proper.
838 */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200839#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
840
Jesse Barnes5ca58282009-03-31 14:11:15 -0700841static void i915_hotplug_work_func(struct work_struct *work)
842{
843 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
844 hotplug_work);
845 struct drm_device *dev = dev_priv->dev;
Keith Packardc31c4ba2009-05-06 11:48:58 -0700846 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200847 struct intel_connector *intel_connector;
848 struct intel_encoder *intel_encoder;
849 struct drm_connector *connector;
850 unsigned long irqflags;
851 bool hpd_disabled = false;
Egbert Eich321a1b32013-04-11 16:00:26 +0200852 bool changed = false;
Egbert Eich142e2392013-04-11 15:57:57 +0200853 u32 hpd_event_bits;
Jesse Barnes5ca58282009-03-31 14:11:15 -0700854
Daniel Vetter52d7ece2012-12-01 21:03:22 +0100855 /* HPD irq before everything is fully set up. */
856 if (!dev_priv->enable_hotplug_processing)
857 return;
858
Keith Packarda65e34c2011-07-25 10:04:56 -0700859 mutex_lock(&mode_config->mutex);
Jesse Barnese67189ab2011-02-11 14:44:51 -0800860 DRM_DEBUG_KMS("running encoder hotplug functions\n");
861
Egbert Eichcd569ae2013-04-16 13:36:57 +0200862 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Egbert Eich142e2392013-04-11 15:57:57 +0200863
864 hpd_event_bits = dev_priv->hpd_event_bits;
865 dev_priv->hpd_event_bits = 0;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200866 list_for_each_entry(connector, &mode_config->connector_list, head) {
867 intel_connector = to_intel_connector(connector);
868 intel_encoder = intel_connector->encoder;
869 if (intel_encoder->hpd_pin > HPD_NONE &&
870 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
871 connector->polled == DRM_CONNECTOR_POLL_HPD) {
872 DRM_INFO("HPD interrupt storm detected on connector %s: "
873 "switching from hotplug detection to polling\n",
874 drm_get_connector_name(connector));
875 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
876 connector->polled = DRM_CONNECTOR_POLL_CONNECT
877 | DRM_CONNECTOR_POLL_DISCONNECT;
878 hpd_disabled = true;
879 }
Egbert Eich142e2392013-04-11 15:57:57 +0200880 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
881 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
882 drm_get_connector_name(connector), intel_encoder->hpd_pin);
883 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200884 }
885 /* if there were no outputs to poll, poll was disabled,
886 * therefore make sure it's enabled when disabling HPD on
887 * some connectors */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200888 if (hpd_disabled) {
Egbert Eichcd569ae2013-04-16 13:36:57 +0200889 drm_kms_helper_poll_enable(dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +0200890 mod_timer(&dev_priv->hotplug_reenable_timer,
891 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
892 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200893
894 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
895
Egbert Eich321a1b32013-04-11 16:00:26 +0200896 list_for_each_entry(connector, &mode_config->connector_list, head) {
897 intel_connector = to_intel_connector(connector);
898 intel_encoder = intel_connector->encoder;
899 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
900 if (intel_encoder->hot_plug)
901 intel_encoder->hot_plug(intel_encoder);
902 if (intel_hpd_irq_event(dev, connector))
903 changed = true;
904 }
905 }
Keith Packard40ee3382011-07-28 15:31:19 -0700906 mutex_unlock(&mode_config->mutex);
907
Egbert Eich321a1b32013-04-11 16:00:26 +0200908 if (changed)
909 drm_kms_helper_hotplug_event(dev);
Jesse Barnes5ca58282009-03-31 14:11:15 -0700910}
911
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200912static void ironlake_rps_change_irq_handler(struct drm_device *dev)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800913{
914 drm_i915_private_t *dev_priv = dev->dev_private;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000915 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +0200916 u8 new_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200917
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200918 spin_lock(&mchdev_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800919
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200920 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
921
Daniel Vetter20e4d402012-08-08 23:35:39 +0200922 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200923
Jesse Barnes7648fa92010-05-20 14:28:11 -0700924 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000925 busy_up = I915_READ(RCPREVBSYTUPAVG);
926 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800927 max_avg = I915_READ(RCBMAXAVG);
928 min_avg = I915_READ(RCBMINAVG);
929
930 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000931 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200932 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
933 new_delay = dev_priv->ips.cur_delay - 1;
934 if (new_delay < dev_priv->ips.max_delay)
935 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000936 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200937 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
938 new_delay = dev_priv->ips.cur_delay + 1;
939 if (new_delay > dev_priv->ips.min_delay)
940 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800941 }
942
Jesse Barnes7648fa92010-05-20 14:28:11 -0700943 if (ironlake_set_drps(dev, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +0200944 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800945
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200946 spin_unlock(&mchdev_lock);
Daniel Vetter92703882012-08-09 16:46:01 +0200947
Jesse Barnesf97108d2010-01-29 11:27:07 -0800948 return;
949}
950
Chris Wilson549f7362010-10-19 11:19:32 +0100951static void notify_ring(struct drm_device *dev,
952 struct intel_ring_buffer *ring)
953{
Chris Wilson475553d2011-01-20 09:52:56 +0000954 if (ring->obj == NULL)
955 return;
956
Chris Wilson814e9b52013-09-23 17:33:19 -0300957 trace_i915_gem_request_complete(ring);
Chris Wilson9862e602011-01-04 22:22:17 +0000958
Chris Wilson549f7362010-10-19 11:19:32 +0100959 wake_up_all(&ring->irq_queue);
Mika Kuoppala10cd45b2013-07-03 17:22:08 +0300960 i915_queue_hangcheck(dev);
Chris Wilson549f7362010-10-19 11:19:32 +0100961}
962
Ben Widawsky4912d042011-04-25 11:25:20 -0700963static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800964{
Ben Widawsky4912d042011-04-25 11:25:20 -0700965 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200966 rps.work);
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300967 u32 pm_iir;
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100968 int new_delay, adj;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800969
Daniel Vetter59cdb632013-07-04 23:35:28 +0200970 spin_lock_irq(&dev_priv->irq_lock);
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200971 pm_iir = dev_priv->rps.pm_iir;
972 dev_priv->rps.pm_iir = 0;
Ben Widawsky48484052013-05-28 19:22:27 -0700973 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300974 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
Daniel Vetter59cdb632013-07-04 23:35:28 +0200975 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawsky4912d042011-04-25 11:25:20 -0700976
Paulo Zanoni60611c12013-08-15 11:50:01 -0300977 /* Make sure we didn't queue anything we're not going to process. */
978 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
979
Ben Widawsky48484052013-05-28 19:22:27 -0700980 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800981 return;
982
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700983 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100984
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100985 adj = dev_priv->rps.last_adj;
Ville Syrjälä74250342013-06-25 21:38:11 +0300986 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100987 if (adj > 0)
988 adj *= 2;
989 else
990 adj = 1;
991 new_delay = dev_priv->rps.cur_delay + adj;
Ville Syrjälä74250342013-06-25 21:38:11 +0300992
993 /*
994 * For better performance, jump directly
995 * to RPe if we're below it.
996 */
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100997 if (new_delay < dev_priv->rps.rpe_delay)
Ville Syrjälä74250342013-06-25 21:38:11 +0300998 new_delay = dev_priv->rps.rpe_delay;
Chris Wilsondd75fdc2013-09-25 17:34:57 +0100999 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1000 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
1001 new_delay = dev_priv->rps.rpe_delay;
1002 else
1003 new_delay = dev_priv->rps.min_delay;
1004 adj = 0;
1005 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1006 if (adj < 0)
1007 adj *= 2;
1008 else
1009 adj = -1;
1010 new_delay = dev_priv->rps.cur_delay + adj;
1011 } else { /* unknown event */
1012 new_delay = dev_priv->rps.cur_delay;
1013 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001014
Ben Widawsky79249632012-09-07 19:43:42 -07001015 /* sysfs frequency interfaces may have snuck in while servicing the
1016 * interrupt
1017 */
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001018 if (new_delay < (int)dev_priv->rps.min_delay)
1019 new_delay = dev_priv->rps.min_delay;
1020 if (new_delay > (int)dev_priv->rps.max_delay)
1021 new_delay = dev_priv->rps.max_delay;
1022 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
1023
1024 if (IS_VALLEYVIEW(dev_priv->dev))
1025 valleyview_set_rps(dev_priv->dev, new_delay);
1026 else
1027 gen6_set_rps(dev_priv->dev, new_delay);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001028
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001029 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001030}
1031
Ben Widawskye3689192012-05-25 16:56:22 -07001032
1033/**
1034 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1035 * occurred.
1036 * @work: workqueue struct
1037 *
1038 * Doesn't actually do anything except notify userspace. As a consequence of
1039 * this event, userspace should try to remap the bad rows since statistically
1040 * it is likely the same row is more likely to go bad again.
1041 */
1042static void ivybridge_parity_work(struct work_struct *work)
1043{
1044 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001045 l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001046 u32 error_status, row, bank, subbank;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001047 char *parity_event[6];
Ben Widawskye3689192012-05-25 16:56:22 -07001048 uint32_t misccpctl;
1049 unsigned long flags;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001050 uint8_t slice = 0;
Ben Widawskye3689192012-05-25 16:56:22 -07001051
1052 /* We must turn off DOP level clock gating to access the L3 registers.
1053 * In order to prevent a get/put style interface, acquire struct mutex
1054 * any time we access those registers.
1055 */
1056 mutex_lock(&dev_priv->dev->struct_mutex);
1057
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001058 /* If we've screwed up tracking, just let the interrupt fire again */
1059 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1060 goto out;
1061
Ben Widawskye3689192012-05-25 16:56:22 -07001062 misccpctl = I915_READ(GEN7_MISCCPCTL);
1063 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1064 POSTING_READ(GEN7_MISCCPCTL);
1065
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001066 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1067 u32 reg;
Ben Widawskye3689192012-05-25 16:56:22 -07001068
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001069 slice--;
1070 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1071 break;
1072
1073 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1074
1075 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1076
1077 error_status = I915_READ(reg);
1078 row = GEN7_PARITY_ERROR_ROW(error_status);
1079 bank = GEN7_PARITY_ERROR_BANK(error_status);
1080 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1081
1082 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1083 POSTING_READ(reg);
1084
1085 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1086 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1087 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1088 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1089 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1090 parity_event[5] = NULL;
1091
Dave Airlie5bdebb12013-10-11 14:07:25 +10001092 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001093 KOBJ_CHANGE, parity_event);
1094
1095 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1096 slice, row, bank, subbank);
1097
1098 kfree(parity_event[4]);
1099 kfree(parity_event[3]);
1100 kfree(parity_event[2]);
1101 kfree(parity_event[1]);
1102 }
Ben Widawskye3689192012-05-25 16:56:22 -07001103
1104 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1105
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001106out:
1107 WARN_ON(dev_priv->l3_parity.which_slice);
Ben Widawskye3689192012-05-25 16:56:22 -07001108 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001109 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
Ben Widawskye3689192012-05-25 16:56:22 -07001110 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1111
1112 mutex_unlock(&dev_priv->dev->struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001113}
1114
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001115static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
Ben Widawskye3689192012-05-25 16:56:22 -07001116{
1117 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Ben Widawskye3689192012-05-25 16:56:22 -07001118
Ben Widawsky040d2ba2013-09-19 11:01:40 -07001119 if (!HAS_L3_DPF(dev))
Ben Widawskye3689192012-05-25 16:56:22 -07001120 return;
1121
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001122 spin_lock(&dev_priv->irq_lock);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001123 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001124 spin_unlock(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001125
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001126 iir &= GT_PARITY_ERROR(dev);
1127 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1128 dev_priv->l3_parity.which_slice |= 1 << 1;
1129
1130 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1131 dev_priv->l3_parity.which_slice |= 1 << 0;
1132
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001133 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001134}
1135
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001136static void ilk_gt_irq_handler(struct drm_device *dev,
1137 struct drm_i915_private *dev_priv,
1138 u32 gt_iir)
1139{
1140 if (gt_iir &
1141 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1142 notify_ring(dev, &dev_priv->ring[RCS]);
1143 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1144 notify_ring(dev, &dev_priv->ring[VCS]);
1145}
1146
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001147static void snb_gt_irq_handler(struct drm_device *dev,
1148 struct drm_i915_private *dev_priv,
1149 u32 gt_iir)
1150{
1151
Ben Widawskycc609d52013-05-28 19:22:29 -07001152 if (gt_iir &
1153 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001154 notify_ring(dev, &dev_priv->ring[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001155 if (gt_iir & GT_BSD_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001156 notify_ring(dev, &dev_priv->ring[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001157 if (gt_iir & GT_BLT_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001158 notify_ring(dev, &dev_priv->ring[BCS]);
1159
Ben Widawskycc609d52013-05-28 19:22:29 -07001160 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1161 GT_BSD_CS_ERROR_INTERRUPT |
1162 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001163 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
1164 i915_handle_error(dev, false);
1165 }
Ben Widawskye3689192012-05-25 16:56:22 -07001166
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001167 if (gt_iir & GT_PARITY_ERROR(dev))
1168 ivybridge_parity_error_irq_handler(dev, gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001169}
1170
Ben Widawskyabd58f02013-11-02 21:07:09 -07001171static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1172 struct drm_i915_private *dev_priv,
1173 u32 master_ctl)
1174{
1175 u32 rcs, bcs, vcs;
1176 uint32_t tmp = 0;
1177 irqreturn_t ret = IRQ_NONE;
1178
1179 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1180 tmp = I915_READ(GEN8_GT_IIR(0));
1181 if (tmp) {
1182 ret = IRQ_HANDLED;
1183 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1184 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1185 if (rcs & GT_RENDER_USER_INTERRUPT)
1186 notify_ring(dev, &dev_priv->ring[RCS]);
1187 if (bcs & GT_RENDER_USER_INTERRUPT)
1188 notify_ring(dev, &dev_priv->ring[BCS]);
1189 I915_WRITE(GEN8_GT_IIR(0), tmp);
1190 } else
1191 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1192 }
1193
1194 if (master_ctl & GEN8_GT_VCS1_IRQ) {
1195 tmp = I915_READ(GEN8_GT_IIR(1));
1196 if (tmp) {
1197 ret = IRQ_HANDLED;
1198 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1199 if (vcs & GT_RENDER_USER_INTERRUPT)
1200 notify_ring(dev, &dev_priv->ring[VCS]);
1201 I915_WRITE(GEN8_GT_IIR(1), tmp);
1202 } else
1203 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1204 }
1205
1206 if (master_ctl & GEN8_GT_VECS_IRQ) {
1207 tmp = I915_READ(GEN8_GT_IIR(3));
1208 if (tmp) {
1209 ret = IRQ_HANDLED;
1210 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1211 if (vcs & GT_RENDER_USER_INTERRUPT)
1212 notify_ring(dev, &dev_priv->ring[VECS]);
1213 I915_WRITE(GEN8_GT_IIR(3), tmp);
1214 } else
1215 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1216 }
1217
1218 return ret;
1219}
1220
Egbert Eichb543fb02013-04-16 13:36:54 +02001221#define HPD_STORM_DETECT_PERIOD 1000
1222#define HPD_STORM_THRESHOLD 5
1223
Daniel Vetter10a504d2013-06-27 17:52:12 +02001224static inline void intel_hpd_irq_handler(struct drm_device *dev,
Daniel Vetter22062db2013-06-27 17:52:11 +02001225 u32 hotplug_trigger,
1226 const u32 *hpd)
Egbert Eichb543fb02013-04-16 13:36:54 +02001227{
1228 drm_i915_private_t *dev_priv = dev->dev_private;
Egbert Eichb543fb02013-04-16 13:36:54 +02001229 int i;
Daniel Vetter10a504d2013-06-27 17:52:12 +02001230 bool storm_detected = false;
Egbert Eichb543fb02013-04-16 13:36:54 +02001231
Daniel Vetter91d131d2013-06-27 17:52:14 +02001232 if (!hotplug_trigger)
1233 return;
1234
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02001235 spin_lock(&dev_priv->irq_lock);
Egbert Eichb543fb02013-04-16 13:36:54 +02001236 for (i = 1; i < HPD_NUM_PINS; i++) {
Egbert Eich821450c2013-04-16 13:36:55 +02001237
Egbert Eichb8f102e2013-07-26 14:14:24 +02001238 WARN(((hpd[i] & hotplug_trigger) &&
1239 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
1240 "Received HPD interrupt although disabled\n");
1241
Egbert Eichb543fb02013-04-16 13:36:54 +02001242 if (!(hpd[i] & hotplug_trigger) ||
1243 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1244 continue;
1245
Jani Nikulabc5ead8c2013-05-07 15:10:29 +03001246 dev_priv->hpd_event_bits |= (1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001247 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1248 dev_priv->hpd_stats[i].hpd_last_jiffies
1249 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1250 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1251 dev_priv->hpd_stats[i].hpd_cnt = 0;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001252 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001253 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1254 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
Egbert Eich142e2392013-04-11 15:57:57 +02001255 dev_priv->hpd_event_bits &= ~(1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001256 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
Daniel Vetter10a504d2013-06-27 17:52:12 +02001257 storm_detected = true;
Egbert Eichb543fb02013-04-16 13:36:54 +02001258 } else {
1259 dev_priv->hpd_stats[i].hpd_cnt++;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001260 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1261 dev_priv->hpd_stats[i].hpd_cnt);
Egbert Eichb543fb02013-04-16 13:36:54 +02001262 }
1263 }
1264
Daniel Vetter10a504d2013-06-27 17:52:12 +02001265 if (storm_detected)
1266 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02001267 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter5876fa02013-06-27 17:52:13 +02001268
Daniel Vetter645416f2013-09-02 16:22:25 +02001269 /*
1270 * Our hotplug handler can grab modeset locks (by calling down into the
1271 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1272 * queue for otherwise the flush_work in the pageflip code will
1273 * deadlock.
1274 */
1275 schedule_work(&dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02001276}
1277
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001278static void gmbus_irq_handler(struct drm_device *dev)
1279{
Daniel Vetter28c70f12012-12-01 13:53:45 +01001280 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1281
Daniel Vetter28c70f12012-12-01 13:53:45 +01001282 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001283}
1284
Daniel Vetterce99c252012-12-01 13:53:47 +01001285static void dp_aux_irq_handler(struct drm_device *dev)
1286{
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001287 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1288
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001289 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +01001290}
1291
Shuang He8bf1e9f2013-10-15 18:55:27 +01001292#if defined(CONFIG_DEBUG_FS)
Daniel Vetter277de952013-10-18 16:37:07 +02001293static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1294 uint32_t crc0, uint32_t crc1,
1295 uint32_t crc2, uint32_t crc3,
1296 uint32_t crc4)
Shuang He8bf1e9f2013-10-15 18:55:27 +01001297{
1298 struct drm_i915_private *dev_priv = dev->dev_private;
1299 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1300 struct intel_pipe_crc_entry *entry;
Damien Lespiauac2300d2013-10-15 18:55:30 +01001301 int head, tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001302
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001303 spin_lock(&pipe_crc->lock);
1304
Damien Lespiau0c912c72013-10-15 18:55:37 +01001305 if (!pipe_crc->entries) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001306 spin_unlock(&pipe_crc->lock);
Damien Lespiau0c912c72013-10-15 18:55:37 +01001307 DRM_ERROR("spurious interrupt\n");
1308 return;
1309 }
1310
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001311 head = pipe_crc->head;
1312 tail = pipe_crc->tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001313
1314 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001315 spin_unlock(&pipe_crc->lock);
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001316 DRM_ERROR("CRC buffer overflowing\n");
1317 return;
1318 }
1319
1320 entry = &pipe_crc->entries[head];
Shuang He8bf1e9f2013-10-15 18:55:27 +01001321
Daniel Vetter8bc5e952013-10-16 22:55:49 +02001322 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
Daniel Vettereba94eb2013-10-16 22:55:46 +02001323 entry->crc[0] = crc0;
1324 entry->crc[1] = crc1;
1325 entry->crc[2] = crc2;
1326 entry->crc[3] = crc3;
1327 entry->crc[4] = crc4;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001328
1329 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001330 pipe_crc->head = head;
1331
1332 spin_unlock(&pipe_crc->lock);
Damien Lespiau07144422013-10-15 18:55:40 +01001333
1334 wake_up_interruptible(&pipe_crc->wq);
Shuang He8bf1e9f2013-10-15 18:55:27 +01001335}
1336#else
Daniel Vetter277de952013-10-18 16:37:07 +02001337static inline void
1338display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1339 uint32_t crc0, uint32_t crc1,
1340 uint32_t crc2, uint32_t crc3,
1341 uint32_t crc4) {}
Shuang He8bf1e9f2013-10-15 18:55:27 +01001342#endif
1343
Daniel Vetter277de952013-10-18 16:37:07 +02001344
1345static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Chris Wilsonfc6826d2012-04-15 11:56:03 +01001346{
1347 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001348
Daniel Vetter277de952013-10-18 16:37:07 +02001349 display_pipe_crc_irq_handler(dev, pipe,
1350 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1351 0, 0, 0, 0);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001352}
1353
Daniel Vetter277de952013-10-18 16:37:07 +02001354static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Chris Wilsonfc6826d2012-04-15 11:56:03 +01001355{
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001356 struct drm_i915_private *dev_priv = dev->dev_private;
1357
Daniel Vetter277de952013-10-18 16:37:07 +02001358 display_pipe_crc_irq_handler(dev, pipe,
1359 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1360 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1361 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1362 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1363 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001364}
1365
Daniel Vetter277de952013-10-18 16:37:07 +02001366static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
Keith Packard05eff842008-11-19 14:03:05 -08001367{
1368 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001369 uint32_t res1, res2;
1370
1371 if (INTEL_INFO(dev)->gen >= 3)
1372 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1373 else
1374 res1 = 0;
1375
1376 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1377 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1378 else
1379 res2 = 0;
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001380
Daniel Vetter277de952013-10-18 16:37:07 +02001381 display_pipe_crc_irq_handler(dev, pipe,
1382 I915_READ(PIPE_CRC_RES_RED(pipe)),
1383 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1384 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1385 res1, res2);
Daniel Vetter41a05a32013-07-04 23:35:26 +02001386}
Ben Widawskybaf02a12013-05-28 19:22:24 -07001387
1388/* The RPS events need forcewake, so we add them to a work queue and mask their
1389 * IMR bits until the work is done. Other interrupts can be processed without
1390 * the work queue. */
1391static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1392{
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001393 if (pm_iir & GEN6_PM_RPS_EVENTS) {
Ben Widawsky48484052013-05-28 19:22:27 -07001394 spin_lock(&dev_priv->irq_lock);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001395 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
Paulo Zanoni4d3b3d52013-08-09 17:04:36 -03001396 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
Daniel Vetter59cdb632013-07-04 23:35:28 +02001397 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter2adbee62013-07-04 23:35:27 +02001398
1399 queue_work(dev_priv->wq, &dev_priv->rps.work);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001400 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001401
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001402 if (HAS_VEBOX(dev_priv->dev)) {
1403 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1404 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
Ben Widawsky12638c52013-05-28 19:22:31 -07001405
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001406 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1407 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
1408 i915_handle_error(dev_priv->dev, false);
1409 }
Ben Widawsky12638c52013-05-28 19:22:31 -07001410 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001411}
1412
Daniel Vetterff1f5252012-10-02 15:10:55 +02001413static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001414{
1415 struct drm_device *dev = (struct drm_device *) arg;
1416 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1417 u32 iir, gt_iir, pm_iir;
1418 irqreturn_t ret = IRQ_NONE;
1419 unsigned long irqflags;
1420 int pipe;
1421 u32 pipe_stats[I915_MAX_PIPES];
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001422
1423 atomic_inc(&dev_priv->irq_received);
1424
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001425 while (true) {
1426 iir = I915_READ(VLV_IIR);
1427 gt_iir = I915_READ(GTIIR);
1428 pm_iir = I915_READ(GEN6_PMIIR);
1429
1430 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1431 goto out;
1432
1433 ret = IRQ_HANDLED;
1434
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001435 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001436
1437 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1438 for_each_pipe(pipe) {
1439 int reg = PIPESTAT(pipe);
1440 pipe_stats[pipe] = I915_READ(reg);
1441
1442 /*
1443 * Clear the PIPE*STAT regs before the IIR
1444 */
1445 if (pipe_stats[pipe] & 0x8000ffff) {
1446 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1447 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1448 pipe_name(pipe));
1449 I915_WRITE(reg, pipe_stats[pipe]);
1450 }
1451 }
1452 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1453
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001454 for_each_pipe(pipe) {
Jesse Barnes7b5562d2013-11-05 15:48:01 -08001455 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001456 drm_handle_vblank(dev, pipe);
1457
1458 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1459 intel_prepare_page_flip(dev, pipe);
1460 intel_finish_page_flip(dev, pipe);
1461 }
Daniel Vetter4356d582013-10-16 22:55:55 +02001462
1463 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02001464 i9xx_pipe_crc_irq_handler(dev, pipe);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001465 }
1466
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001467 /* Consume port. Then clear IIR or we'll miss events */
1468 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1469 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02001470 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001471
1472 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1473 hotplug_status);
Daniel Vetter91d131d2013-06-27 17:52:14 +02001474
1475 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1476
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001477 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1478 I915_READ(PORT_HOTPLUG_STAT);
1479 }
1480
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001481 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1482 gmbus_irq_handler(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001483
Paulo Zanoni60611c12013-08-15 11:50:01 -03001484 if (pm_iir)
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001485 gen6_rps_irq_handler(dev_priv, pm_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001486
1487 I915_WRITE(GTIIR, gt_iir);
1488 I915_WRITE(GEN6_PMIIR, pm_iir);
1489 I915_WRITE(VLV_IIR, iir);
1490 }
1491
1492out:
1493 return ret;
1494}
1495
Adam Jackson23e81d62012-06-06 15:45:44 -04001496static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08001497{
1498 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001499 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001500 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08001501
Daniel Vetter91d131d2013-06-27 17:52:14 +02001502 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1503
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001504 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1505 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1506 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08001507 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001508 port_name(port));
1509 }
Jesse Barnes776ad802011-01-04 15:09:39 -08001510
Daniel Vetterce99c252012-12-01 13:53:47 +01001511 if (pch_iir & SDE_AUX_MASK)
1512 dp_aux_irq_handler(dev);
1513
Jesse Barnes776ad802011-01-04 15:09:39 -08001514 if (pch_iir & SDE_GMBUS)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001515 gmbus_irq_handler(dev);
Jesse Barnes776ad802011-01-04 15:09:39 -08001516
1517 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1518 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1519
1520 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1521 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1522
1523 if (pch_iir & SDE_POISON)
1524 DRM_ERROR("PCH poison interrupt\n");
1525
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001526 if (pch_iir & SDE_FDI_MASK)
1527 for_each_pipe(pipe)
1528 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1529 pipe_name(pipe),
1530 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08001531
1532 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1533 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1534
1535 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1536 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1537
Jesse Barnes776ad802011-01-04 15:09:39 -08001538 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Paulo Zanoni86642812013-04-12 17:57:57 -03001539 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1540 false))
1541 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1542
1543 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1544 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1545 false))
1546 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1547}
1548
1549static void ivb_err_int_handler(struct drm_device *dev)
1550{
1551 struct drm_i915_private *dev_priv = dev->dev_private;
1552 u32 err_int = I915_READ(GEN7_ERR_INT);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001553 enum pipe pipe;
Paulo Zanoni86642812013-04-12 17:57:57 -03001554
Paulo Zanonide032bf2013-04-12 17:57:58 -03001555 if (err_int & ERR_INT_POISON)
1556 DRM_ERROR("Poison interrupt\n");
1557
Daniel Vetter5a69b892013-10-16 22:55:52 +02001558 for_each_pipe(pipe) {
1559 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1560 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1561 false))
1562 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1563 pipe_name(pipe));
1564 }
Paulo Zanoni86642812013-04-12 17:57:57 -03001565
Daniel Vetter5a69b892013-10-16 22:55:52 +02001566 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1567 if (IS_IVYBRIDGE(dev))
Daniel Vetter277de952013-10-18 16:37:07 +02001568 ivb_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001569 else
Daniel Vetter277de952013-10-18 16:37:07 +02001570 hsw_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001571 }
1572 }
Paulo Zanoni86642812013-04-12 17:57:57 -03001573
1574 I915_WRITE(GEN7_ERR_INT, err_int);
1575}
1576
1577static void cpt_serr_int_handler(struct drm_device *dev)
1578{
1579 struct drm_i915_private *dev_priv = dev->dev_private;
1580 u32 serr_int = I915_READ(SERR_INT);
1581
Paulo Zanonide032bf2013-04-12 17:57:58 -03001582 if (serr_int & SERR_INT_POISON)
1583 DRM_ERROR("PCH poison interrupt\n");
1584
Paulo Zanoni86642812013-04-12 17:57:57 -03001585 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1586 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1587 false))
1588 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1589
1590 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1591 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1592 false))
1593 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1594
1595 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1596 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1597 false))
1598 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1599
1600 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08001601}
1602
Adam Jackson23e81d62012-06-06 15:45:44 -04001603static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1604{
1605 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1606 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001607 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04001608
Daniel Vetter91d131d2013-06-27 17:52:14 +02001609 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1610
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001611 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1612 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1613 SDE_AUDIO_POWER_SHIFT_CPT);
1614 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1615 port_name(port));
1616 }
Adam Jackson23e81d62012-06-06 15:45:44 -04001617
1618 if (pch_iir & SDE_AUX_MASK_CPT)
Daniel Vetterce99c252012-12-01 13:53:47 +01001619 dp_aux_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001620
1621 if (pch_iir & SDE_GMBUS_CPT)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001622 gmbus_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001623
1624 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1625 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1626
1627 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1628 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1629
1630 if (pch_iir & SDE_FDI_MASK_CPT)
1631 for_each_pipe(pipe)
1632 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1633 pipe_name(pipe),
1634 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03001635
1636 if (pch_iir & SDE_ERROR_CPT)
1637 cpt_serr_int_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001638}
1639
Paulo Zanonic008bc62013-07-12 16:35:10 -03001640static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1641{
1642 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter40da17c2013-10-21 18:04:36 +02001643 enum pipe pipe;
Paulo Zanonic008bc62013-07-12 16:35:10 -03001644
1645 if (de_iir & DE_AUX_CHANNEL_A)
1646 dp_aux_irq_handler(dev);
1647
1648 if (de_iir & DE_GSE)
1649 intel_opregion_asle_intr(dev);
1650
Paulo Zanonic008bc62013-07-12 16:35:10 -03001651 if (de_iir & DE_POISON)
1652 DRM_ERROR("Poison interrupt\n");
1653
Daniel Vetter40da17c2013-10-21 18:04:36 +02001654 for_each_pipe(pipe) {
1655 if (de_iir & DE_PIPE_VBLANK(pipe))
1656 drm_handle_vblank(dev, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03001657
Daniel Vetter40da17c2013-10-21 18:04:36 +02001658 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1659 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1660 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1661 pipe_name(pipe));
Paulo Zanonic008bc62013-07-12 16:35:10 -03001662
Daniel Vetter40da17c2013-10-21 18:04:36 +02001663 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1664 i9xx_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001665
Daniel Vetter40da17c2013-10-21 18:04:36 +02001666 /* plane/pipes map 1:1 on ilk+ */
1667 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1668 intel_prepare_page_flip(dev, pipe);
1669 intel_finish_page_flip_plane(dev, pipe);
1670 }
Paulo Zanonic008bc62013-07-12 16:35:10 -03001671 }
1672
1673 /* check event from PCH */
1674 if (de_iir & DE_PCH_EVENT) {
1675 u32 pch_iir = I915_READ(SDEIIR);
1676
1677 if (HAS_PCH_CPT(dev))
1678 cpt_irq_handler(dev, pch_iir);
1679 else
1680 ibx_irq_handler(dev, pch_iir);
1681
1682 /* should clear PCH hotplug event before clear CPU irq */
1683 I915_WRITE(SDEIIR, pch_iir);
1684 }
1685
1686 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1687 ironlake_rps_change_irq_handler(dev);
1688}
1689
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001690static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1691{
1692 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02001693 enum pipe i;
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001694
1695 if (de_iir & DE_ERR_INT_IVB)
1696 ivb_err_int_handler(dev);
1697
1698 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1699 dp_aux_irq_handler(dev);
1700
1701 if (de_iir & DE_GSE_IVB)
1702 intel_opregion_asle_intr(dev);
1703
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02001704 for_each_pipe(i) {
Daniel Vetter40da17c2013-10-21 18:04:36 +02001705 if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001706 drm_handle_vblank(dev, i);
Daniel Vetter40da17c2013-10-21 18:04:36 +02001707
1708 /* plane/pipes map 1:1 on ilk+ */
1709 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001710 intel_prepare_page_flip(dev, i);
1711 intel_finish_page_flip_plane(dev, i);
1712 }
1713 }
1714
1715 /* check event from PCH */
1716 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1717 u32 pch_iir = I915_READ(SDEIIR);
1718
1719 cpt_irq_handler(dev, pch_iir);
1720
1721 /* clear PCH hotplug event before clear CPU irq */
1722 I915_WRITE(SDEIIR, pch_iir);
1723 }
1724}
1725
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001726static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001727{
1728 struct drm_device *dev = (struct drm_device *) arg;
1729 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001730 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01001731 irqreturn_t ret = IRQ_NONE;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001732
1733 atomic_inc(&dev_priv->irq_received);
1734
Paulo Zanoni86642812013-04-12 17:57:57 -03001735 /* We get interrupts on unclaimed registers, so check for this before we
1736 * do any I915_{READ,WRITE}. */
Chris Wilson907b28c2013-07-19 20:36:52 +01001737 intel_uncore_check_errors(dev);
Paulo Zanoni86642812013-04-12 17:57:57 -03001738
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001739 /* disable master interrupt before clearing iir */
1740 de_ier = I915_READ(DEIER);
1741 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Paulo Zanoni23a78512013-07-12 16:35:14 -03001742 POSTING_READ(DEIER);
Chris Wilson0e434062012-05-09 21:45:44 +01001743
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001744 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1745 * interrupts will will be stored on its back queue, and then we'll be
1746 * able to process them after we restore SDEIER (as soon as we restore
1747 * it, we'll get an interrupt if SDEIIR still has something to process
1748 * due to its back queue). */
Ben Widawskyab5c6082013-04-05 13:12:41 -07001749 if (!HAS_PCH_NOP(dev)) {
1750 sde_ier = I915_READ(SDEIER);
1751 I915_WRITE(SDEIER, 0);
1752 POSTING_READ(SDEIER);
1753 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001754
Chris Wilson0e434062012-05-09 21:45:44 +01001755 gt_iir = I915_READ(GTIIR);
1756 if (gt_iir) {
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001757 if (INTEL_INFO(dev)->gen >= 6)
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001758 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001759 else
1760 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001761 I915_WRITE(GTIIR, gt_iir);
1762 ret = IRQ_HANDLED;
1763 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001764
1765 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01001766 if (de_iir) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001767 if (INTEL_INFO(dev)->gen >= 7)
1768 ivb_display_irq_handler(dev, de_iir);
1769 else
1770 ilk_display_irq_handler(dev, de_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001771 I915_WRITE(DEIIR, de_iir);
1772 ret = IRQ_HANDLED;
1773 }
1774
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001775 if (INTEL_INFO(dev)->gen >= 6) {
1776 u32 pm_iir = I915_READ(GEN6_PMIIR);
1777 if (pm_iir) {
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001778 gen6_rps_irq_handler(dev_priv, pm_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001779 I915_WRITE(GEN6_PMIIR, pm_iir);
1780 ret = IRQ_HANDLED;
1781 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001782 }
1783
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001784 I915_WRITE(DEIER, de_ier);
1785 POSTING_READ(DEIER);
Ben Widawskyab5c6082013-04-05 13:12:41 -07001786 if (!HAS_PCH_NOP(dev)) {
1787 I915_WRITE(SDEIER, sde_ier);
1788 POSTING_READ(SDEIER);
1789 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001790
1791 return ret;
1792}
1793
Ben Widawskyabd58f02013-11-02 21:07:09 -07001794static irqreturn_t gen8_irq_handler(int irq, void *arg)
1795{
1796 struct drm_device *dev = arg;
1797 struct drm_i915_private *dev_priv = dev->dev_private;
1798 u32 master_ctl;
1799 irqreturn_t ret = IRQ_NONE;
1800 uint32_t tmp = 0;
Daniel Vetterc42664c2013-11-07 11:05:40 +01001801 enum pipe pipe;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001802
1803 atomic_inc(&dev_priv->irq_received);
1804
1805 master_ctl = I915_READ(GEN8_MASTER_IRQ);
1806 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
1807 if (!master_ctl)
1808 return IRQ_NONE;
1809
1810 I915_WRITE(GEN8_MASTER_IRQ, 0);
1811 POSTING_READ(GEN8_MASTER_IRQ);
1812
1813 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1814
1815 if (master_ctl & GEN8_DE_MISC_IRQ) {
1816 tmp = I915_READ(GEN8_DE_MISC_IIR);
1817 if (tmp & GEN8_DE_MISC_GSE)
1818 intel_opregion_asle_intr(dev);
1819 else if (tmp)
1820 DRM_ERROR("Unexpected DE Misc interrupt\n");
1821 else
1822 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
1823
1824 if (tmp) {
1825 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
1826 ret = IRQ_HANDLED;
1827 }
1828 }
1829
Daniel Vetter6d766f02013-11-07 14:49:55 +01001830 if (master_ctl & GEN8_DE_PORT_IRQ) {
1831 tmp = I915_READ(GEN8_DE_PORT_IIR);
1832 if (tmp & GEN8_AUX_CHANNEL_A)
1833 dp_aux_irq_handler(dev);
1834 else if (tmp)
1835 DRM_ERROR("Unexpected DE Port interrupt\n");
1836 else
1837 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
1838
1839 if (tmp) {
1840 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
1841 ret = IRQ_HANDLED;
1842 }
1843 }
1844
Daniel Vetterc42664c2013-11-07 11:05:40 +01001845 for_each_pipe(pipe) {
1846 uint32_t pipe_iir;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001847
Daniel Vetterc42664c2013-11-07 11:05:40 +01001848 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
1849 continue;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001850
Daniel Vetterc42664c2013-11-07 11:05:40 +01001851 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
1852 if (pipe_iir & GEN8_PIPE_VBLANK)
1853 drm_handle_vblank(dev, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001854
Daniel Vetterc42664c2013-11-07 11:05:40 +01001855 if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
1856 intel_prepare_page_flip(dev, pipe);
1857 intel_finish_page_flip_plane(dev, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001858 }
Daniel Vetterc42664c2013-11-07 11:05:40 +01001859
Daniel Vetter0fbe7872013-11-07 11:05:44 +01001860 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
1861 hsw_pipe_crc_irq_handler(dev, pipe);
1862
Daniel Vetter38d83c962013-11-07 11:05:46 +01001863 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
1864 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1865 false))
1866 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1867 pipe_name(pipe));
1868 }
1869
Daniel Vetter30100f22013-11-07 14:49:24 +01001870 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
1871 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
1872 pipe_name(pipe),
1873 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
1874 }
Daniel Vetterc42664c2013-11-07 11:05:40 +01001875
1876 if (pipe_iir) {
1877 ret = IRQ_HANDLED;
1878 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
1879 } else
Ben Widawskyabd58f02013-11-02 21:07:09 -07001880 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
1881 }
1882
Daniel Vetter92d03a82013-11-07 11:05:43 +01001883 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
1884 /*
1885 * FIXME(BDW): Assume for now that the new interrupt handling
1886 * scheme also closed the SDE interrupt handling race we've seen
1887 * on older pch-split platforms. But this needs testing.
1888 */
1889 u32 pch_iir = I915_READ(SDEIIR);
1890
1891 cpt_irq_handler(dev, pch_iir);
1892
1893 if (pch_iir) {
1894 I915_WRITE(SDEIIR, pch_iir);
1895 ret = IRQ_HANDLED;
1896 }
1897 }
1898
Ben Widawskyabd58f02013-11-02 21:07:09 -07001899 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1900 POSTING_READ(GEN8_MASTER_IRQ);
1901
1902 return ret;
1903}
1904
Daniel Vetter17e1df02013-09-08 21:57:13 +02001905static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1906 bool reset_completed)
1907{
1908 struct intel_ring_buffer *ring;
1909 int i;
1910
1911 /*
1912 * Notify all waiters for GPU completion events that reset state has
1913 * been changed, and that they need to restart their wait after
1914 * checking for potential errors (and bail out to drop locks if there is
1915 * a gpu reset pending so that i915_error_work_func can acquire them).
1916 */
1917
1918 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1919 for_each_ring(ring, dev_priv, i)
1920 wake_up_all(&ring->irq_queue);
1921
1922 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
1923 wake_up_all(&dev_priv->pending_flip_queue);
1924
1925 /*
1926 * Signal tasks blocked in i915_gem_wait_for_error that the pending
1927 * reset state is cleared.
1928 */
1929 if (reset_completed)
1930 wake_up_all(&dev_priv->gpu_error.reset_queue);
1931}
1932
Jesse Barnes8a905232009-07-11 16:48:03 -04001933/**
1934 * i915_error_work_func - do process context error handling work
1935 * @work: work struct
1936 *
1937 * Fire an error uevent so userspace can see that a hang or error
1938 * was detected.
1939 */
1940static void i915_error_work_func(struct work_struct *work)
1941{
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001942 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1943 work);
1944 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1945 gpu_error);
Jesse Barnes8a905232009-07-11 16:48:03 -04001946 struct drm_device *dev = dev_priv->dev;
Ben Widawskycce723e2013-07-19 09:16:42 -07001947 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1948 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1949 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
Daniel Vetter17e1df02013-09-08 21:57:13 +02001950 int ret;
Jesse Barnes8a905232009-07-11 16:48:03 -04001951
Dave Airlie5bdebb12013-10-11 14:07:25 +10001952 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04001953
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001954 /*
1955 * Note that there's only one work item which does gpu resets, so we
1956 * need not worry about concurrent gpu resets potentially incrementing
1957 * error->reset_counter twice. We only need to take care of another
1958 * racing irq/hangcheck declaring the gpu dead for a second time. A
1959 * quick check for that is good enough: schedule_work ensures the
1960 * correct ordering between hang detection and this work item, and since
1961 * the reset in-progress bit is only ever set by code outside of this
1962 * work we don't need to worry about any other races.
1963 */
1964 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
Chris Wilsonf803aa52010-09-19 12:38:26 +01001965 DRM_DEBUG_DRIVER("resetting chip\n");
Dave Airlie5bdebb12013-10-11 14:07:25 +10001966 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001967 reset_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001968
Daniel Vetter17e1df02013-09-08 21:57:13 +02001969 /*
1970 * All state reset _must_ be completed before we update the
1971 * reset counter, for otherwise waiters might miss the reset
1972 * pending state and not properly drop locks, resulting in
1973 * deadlocks with the reset work.
1974 */
Daniel Vetterf69061b2012-12-06 09:01:42 +01001975 ret = i915_reset(dev);
1976
Daniel Vetter17e1df02013-09-08 21:57:13 +02001977 intel_display_handle_reset(dev);
1978
Daniel Vetterf69061b2012-12-06 09:01:42 +01001979 if (ret == 0) {
1980 /*
1981 * After all the gem state is reset, increment the reset
1982 * counter and wake up everyone waiting for the reset to
1983 * complete.
1984 *
1985 * Since unlock operations are a one-sided barrier only,
1986 * we need to insert a barrier here to order any seqno
1987 * updates before
1988 * the counter increment.
1989 */
1990 smp_mb__before_atomic_inc();
1991 atomic_inc(&dev_priv->gpu_error.reset_counter);
1992
Dave Airlie5bdebb12013-10-11 14:07:25 +10001993 kobject_uevent_env(&dev->primary->kdev->kobj,
Daniel Vetterf69061b2012-12-06 09:01:42 +01001994 KOBJ_CHANGE, reset_done_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001995 } else {
1996 atomic_set(&error->reset_counter, I915_WEDGED);
Ben Gamarif316a422009-09-14 17:48:46 -04001997 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001998
Daniel Vetter17e1df02013-09-08 21:57:13 +02001999 /*
2000 * Note: The wake_up also serves as a memory barrier so that
2001 * waiters see the update value of the reset counter atomic_t.
2002 */
2003 i915_error_wake_up(dev_priv, true);
Ben Gamarif316a422009-09-14 17:48:46 -04002004 }
Jesse Barnes8a905232009-07-11 16:48:03 -04002005}
2006
Chris Wilson35aed2e2010-05-27 13:18:12 +01002007static void i915_report_and_clear_eir(struct drm_device *dev)
Jesse Barnes8a905232009-07-11 16:48:03 -04002008{
2009 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskybd9854f2012-08-23 15:18:09 -07002010 uint32_t instdone[I915_NUM_INSTDONE_REG];
Jesse Barnes8a905232009-07-11 16:48:03 -04002011 u32 eir = I915_READ(EIR);
Ben Widawsky050ee912012-08-22 11:32:15 -07002012 int pipe, i;
Jesse Barnes8a905232009-07-11 16:48:03 -04002013
Chris Wilson35aed2e2010-05-27 13:18:12 +01002014 if (!eir)
2015 return;
Jesse Barnes8a905232009-07-11 16:48:03 -04002016
Joe Perchesa70491c2012-03-18 13:00:11 -07002017 pr_err("render error detected, EIR: 0x%08x\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04002018
Ben Widawskybd9854f2012-08-23 15:18:09 -07002019 i915_get_extra_instdone(dev, instdone);
2020
Jesse Barnes8a905232009-07-11 16:48:03 -04002021 if (IS_G4X(dev)) {
2022 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2023 u32 ipeir = I915_READ(IPEIR_I965);
2024
Joe Perchesa70491c2012-03-18 13:00:11 -07002025 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2026 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Ben Widawsky050ee912012-08-22 11:32:15 -07002027 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2028 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Joe Perchesa70491c2012-03-18 13:00:11 -07002029 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002030 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002031 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002032 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002033 }
2034 if (eir & GM45_ERROR_PAGE_TABLE) {
2035 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002036 pr_err("page table error\n");
2037 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002038 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002039 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002040 }
2041 }
2042
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002043 if (!IS_GEN2(dev)) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002044 if (eir & I915_ERROR_PAGE_TABLE) {
2045 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002046 pr_err("page table error\n");
2047 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002048 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002049 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002050 }
2051 }
2052
2053 if (eir & I915_ERROR_MEMORY_REFRESH) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002054 pr_err("memory refresh error:\n");
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002055 for_each_pipe(pipe)
Joe Perchesa70491c2012-03-18 13:00:11 -07002056 pr_err("pipe %c stat: 0x%08x\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002057 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
Jesse Barnes8a905232009-07-11 16:48:03 -04002058 /* pipestat has already been acked */
2059 }
2060 if (eir & I915_ERROR_INSTRUCTION) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002061 pr_err("instruction error\n");
2062 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
Ben Widawsky050ee912012-08-22 11:32:15 -07002063 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2064 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002065 if (INTEL_INFO(dev)->gen < 4) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002066 u32 ipeir = I915_READ(IPEIR);
2067
Joe Perchesa70491c2012-03-18 13:00:11 -07002068 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2069 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
Joe Perchesa70491c2012-03-18 13:00:11 -07002070 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
Jesse Barnes8a905232009-07-11 16:48:03 -04002071 I915_WRITE(IPEIR, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002072 POSTING_READ(IPEIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002073 } else {
2074 u32 ipeir = I915_READ(IPEIR_I965);
2075
Joe Perchesa70491c2012-03-18 13:00:11 -07002076 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2077 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Joe Perchesa70491c2012-03-18 13:00:11 -07002078 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002079 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002080 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002081 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002082 }
2083 }
2084
2085 I915_WRITE(EIR, eir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002086 POSTING_READ(EIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002087 eir = I915_READ(EIR);
2088 if (eir) {
2089 /*
2090 * some errors might have become stuck,
2091 * mask them.
2092 */
2093 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2094 I915_WRITE(EMR, I915_READ(EMR) | eir);
2095 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2096 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01002097}
2098
2099/**
2100 * i915_handle_error - handle an error interrupt
2101 * @dev: drm device
2102 *
2103 * Do some basic checking of regsiter state at error interrupt time and
2104 * dump it to the syslog. Also call i915_capture_error_state() to make
2105 * sure we get a record and make it available in debugfs. Fire a uevent
2106 * so userspace knows something bad happened (should trigger collection
2107 * of a ring dump etc.).
2108 */
Chris Wilson527f9e92010-11-11 01:16:58 +00002109void i915_handle_error(struct drm_device *dev, bool wedged)
Chris Wilson35aed2e2010-05-27 13:18:12 +01002110{
2111 struct drm_i915_private *dev_priv = dev->dev_private;
2112
2113 i915_capture_error_state(dev);
2114 i915_report_and_clear_eir(dev);
Jesse Barnes8a905232009-07-11 16:48:03 -04002115
Ben Gamariba1234d2009-09-14 17:48:47 -04002116 if (wedged) {
Daniel Vetterf69061b2012-12-06 09:01:42 +01002117 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2118 &dev_priv->gpu_error.reset_counter);
Ben Gamariba1234d2009-09-14 17:48:47 -04002119
Ben Gamari11ed50e2009-09-14 17:48:45 -04002120 /*
Daniel Vetter17e1df02013-09-08 21:57:13 +02002121 * Wakeup waiting processes so that the reset work function
2122 * i915_error_work_func doesn't deadlock trying to grab various
2123 * locks. By bumping the reset counter first, the woken
2124 * processes will see a reset in progress and back off,
2125 * releasing their locks and then wait for the reset completion.
2126 * We must do this for _all_ gpu waiters that might hold locks
2127 * that the reset work needs to acquire.
2128 *
2129 * Note: The wake_up serves as the required memory barrier to
2130 * ensure that the waiters see the updated value of the reset
2131 * counter atomic_t.
Ben Gamari11ed50e2009-09-14 17:48:45 -04002132 */
Daniel Vetter17e1df02013-09-08 21:57:13 +02002133 i915_error_wake_up(dev_priv, false);
Ben Gamari11ed50e2009-09-14 17:48:45 -04002134 }
2135
Daniel Vetter122f46b2013-09-04 17:36:14 +02002136 /*
2137 * Our reset work can grab modeset locks (since it needs to reset the
2138 * state of outstanding pagelips). Hence it must not be run on our own
2139 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2140 * code will deadlock.
2141 */
2142 schedule_work(&dev_priv->gpu_error.work);
Jesse Barnes8a905232009-07-11 16:48:03 -04002143}
2144
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002145static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002146{
2147 drm_i915_private_t *dev_priv = dev->dev_private;
2148 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2149 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Chris Wilson05394f32010-11-08 19:18:58 +00002150 struct drm_i915_gem_object *obj;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002151 struct intel_unpin_work *work;
2152 unsigned long flags;
2153 bool stall_detected;
2154
2155 /* Ignore early vblank irqs */
2156 if (intel_crtc == NULL)
2157 return;
2158
2159 spin_lock_irqsave(&dev->event_lock, flags);
2160 work = intel_crtc->unpin_work;
2161
Chris Wilsone7d841c2012-12-03 11:36:30 +00002162 if (work == NULL ||
2163 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2164 !work->enable_stall_check) {
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002165 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2166 spin_unlock_irqrestore(&dev->event_lock, flags);
2167 return;
2168 }
2169
2170 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
Chris Wilson05394f32010-11-08 19:18:58 +00002171 obj = work->pending_flip_obj;
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002172 if (INTEL_INFO(dev)->gen >= 4) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002173 int dspsurf = DSPSURF(intel_crtc->plane);
Armin Reese446f2542012-03-30 16:20:16 -07002174 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002175 i915_gem_obj_ggtt_offset(obj);
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002176 } else {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002177 int dspaddr = DSPADDR(intel_crtc->plane);
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002178 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
Ville Syrjälä01f2c772011-12-20 00:06:49 +02002179 crtc->y * crtc->fb->pitches[0] +
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002180 crtc->x * crtc->fb->bits_per_pixel/8);
2181 }
2182
2183 spin_unlock_irqrestore(&dev->event_lock, flags);
2184
2185 if (stall_detected) {
2186 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2187 intel_prepare_page_flip(dev, intel_crtc->plane);
2188 }
2189}
2190
Keith Packard42f52ef2008-10-18 19:39:29 -07002191/* Called from drm generic code, passed 'crtc' which
2192 * we use as a pipe index
2193 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002194static int i915_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002195{
2196 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002197 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002198
Chris Wilson5eddb702010-09-11 13:48:45 +01002199 if (!i915_pipe_enabled(dev, pipe))
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002200 return -EINVAL;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002201
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002202 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002203 if (INTEL_INFO(dev)->gen >= 4)
Keith Packard7c463582008-11-04 02:03:27 -08002204 i915_enable_pipestat(dev_priv, pipe,
2205 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Keith Packarde9d21d72008-10-16 11:31:38 -07002206 else
Keith Packard7c463582008-11-04 02:03:27 -08002207 i915_enable_pipestat(dev_priv, pipe,
2208 PIPE_VBLANK_INTERRUPT_ENABLE);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002209
2210 /* maintain vblank delivery even in deep C-states */
2211 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002212 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002213 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002214
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002215 return 0;
2216}
2217
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002218static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002219{
2220 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2221 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002222 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c2013-10-21 18:04:36 +02002223 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002224
2225 if (!i915_pipe_enabled(dev, pipe))
2226 return -EINVAL;
2227
2228 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002229 ironlake_enable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002230 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2231
2232 return 0;
2233}
2234
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002235static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2236{
2237 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2238 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002239 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002240
2241 if (!i915_pipe_enabled(dev, pipe))
2242 return -EINVAL;
2243
2244 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002245 imr = I915_READ(VLV_IMR);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02002246 if (pipe == PIPE_A)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002247 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002248 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002249 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002250 I915_WRITE(VLV_IMR, imr);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002251 i915_enable_pipestat(dev_priv, pipe,
2252 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002253 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2254
2255 return 0;
2256}
2257
Ben Widawskyabd58f02013-11-02 21:07:09 -07002258static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2259{
2260 struct drm_i915_private *dev_priv = dev->dev_private;
2261 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002262
2263 if (!i915_pipe_enabled(dev, pipe))
2264 return -EINVAL;
2265
2266 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter7167d7c2013-11-07 11:05:45 +01002267 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2268 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2269 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
Ben Widawskyabd58f02013-11-02 21:07:09 -07002270 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2271 return 0;
2272}
2273
Keith Packard42f52ef2008-10-18 19:39:29 -07002274/* Called from drm generic code, passed 'crtc' which
2275 * we use as a pipe index
2276 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002277static void i915_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002278{
2279 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002280 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002281
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002282 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002283 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002284 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson8692d00e2011-02-05 10:08:21 +00002285
Jesse Barnesf796cf82011-04-07 13:58:17 -07002286 i915_disable_pipestat(dev_priv, pipe,
2287 PIPE_VBLANK_INTERRUPT_ENABLE |
2288 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2289 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2290}
2291
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002292static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002293{
2294 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2295 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03002296 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Daniel Vetter40da17c2013-10-21 18:04:36 +02002297 DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002298
2299 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03002300 ironlake_disable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002301 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2302}
2303
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002304static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2305{
2306 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2307 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002308 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002309
2310 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002311 i915_disable_pipestat(dev_priv, pipe,
2312 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002313 imr = I915_READ(VLV_IMR);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02002314 if (pipe == PIPE_A)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002315 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002316 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002317 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002318 I915_WRITE(VLV_IMR, imr);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002319 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2320}
2321
Ben Widawskyabd58f02013-11-02 21:07:09 -07002322static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2323{
2324 struct drm_i915_private *dev_priv = dev->dev_private;
2325 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002326
2327 if (!i915_pipe_enabled(dev, pipe))
2328 return;
2329
2330 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter7167d7c2013-11-07 11:05:45 +01002331 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2332 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2333 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
Ben Widawskyabd58f02013-11-02 21:07:09 -07002334 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2335}
2336
Chris Wilson893eead2010-10-27 14:44:35 +01002337static u32
2338ring_last_seqno(struct intel_ring_buffer *ring)
Zou Nan hai852835f2010-05-21 09:08:56 +08002339{
Chris Wilson893eead2010-10-27 14:44:35 +01002340 return list_entry(ring->request_list.prev,
2341 struct drm_i915_gem_request, list)->seqno;
2342}
2343
Chris Wilson9107e9d2013-06-10 11:20:20 +01002344static bool
2345ring_idle(struct intel_ring_buffer *ring, u32 seqno)
Chris Wilson893eead2010-10-27 14:44:35 +01002346{
Chris Wilson9107e9d2013-06-10 11:20:20 +01002347 return (list_empty(&ring->request_list) ||
2348 i915_seqno_passed(seqno, ring_last_seqno(ring)));
Ben Gamarif65d9422009-09-14 17:48:44 -04002349}
2350
Chris Wilson6274f212013-06-10 11:20:21 +01002351static struct intel_ring_buffer *
2352semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
Chris Wilsona24a11e2013-03-14 17:52:05 +02002353{
2354 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Chris Wilson6274f212013-06-10 11:20:21 +01002355 u32 cmd, ipehr, acthd, acthd_min;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002356
2357 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2358 if ((ipehr & ~(0x3 << 16)) !=
2359 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
Chris Wilson6274f212013-06-10 11:20:21 +01002360 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002361
2362 /* ACTHD is likely pointing to the dword after the actual command,
2363 * so scan backwards until we find the MBOX.
2364 */
Chris Wilson6274f212013-06-10 11:20:21 +01002365 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002366 acthd_min = max((int)acthd - 3 * 4, 0);
2367 do {
2368 cmd = ioread32(ring->virtual_start + acthd);
2369 if (cmd == ipehr)
2370 break;
2371
2372 acthd -= 4;
2373 if (acthd < acthd_min)
Chris Wilson6274f212013-06-10 11:20:21 +01002374 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002375 } while (1);
2376
Chris Wilson6274f212013-06-10 11:20:21 +01002377 *seqno = ioread32(ring->virtual_start+acthd+4)+1;
2378 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
Chris Wilsona24a11e2013-03-14 17:52:05 +02002379}
2380
Chris Wilson6274f212013-06-10 11:20:21 +01002381static int semaphore_passed(struct intel_ring_buffer *ring)
2382{
2383 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2384 struct intel_ring_buffer *signaller;
2385 u32 seqno, ctl;
2386
2387 ring->hangcheck.deadlock = true;
2388
2389 signaller = semaphore_waits_for(ring, &seqno);
2390 if (signaller == NULL || signaller->hangcheck.deadlock)
2391 return -1;
2392
2393 /* cursory check for an unkickable deadlock */
2394 ctl = I915_READ_CTL(signaller);
2395 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2396 return -1;
2397
2398 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2399}
2400
2401static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2402{
2403 struct intel_ring_buffer *ring;
2404 int i;
2405
2406 for_each_ring(ring, dev_priv, i)
2407 ring->hangcheck.deadlock = false;
2408}
2409
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002410static enum intel_ring_hangcheck_action
2411ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002412{
2413 struct drm_device *dev = ring->dev;
2414 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002415 u32 tmp;
2416
Chris Wilson6274f212013-06-10 11:20:21 +01002417 if (ring->hangcheck.acthd != acthd)
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002418 return HANGCHECK_ACTIVE;
Chris Wilson6274f212013-06-10 11:20:21 +01002419
Chris Wilson9107e9d2013-06-10 11:20:20 +01002420 if (IS_GEN2(dev))
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002421 return HANGCHECK_HUNG;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002422
2423 /* Is the chip hanging on a WAIT_FOR_EVENT?
2424 * If so we can simply poke the RB_WAIT bit
2425 * and break the hang. This should work on
2426 * all but the second generation chipsets.
2427 */
2428 tmp = I915_READ_CTL(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002429 if (tmp & RING_WAIT) {
2430 DRM_ERROR("Kicking stuck wait on %s\n",
2431 ring->name);
Chris Wilson09e14bf2013-10-10 09:37:19 +01002432 i915_handle_error(dev, false);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002433 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002434 return HANGCHECK_KICK;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002435 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02002436
Chris Wilson6274f212013-06-10 11:20:21 +01002437 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2438 switch (semaphore_passed(ring)) {
2439 default:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002440 return HANGCHECK_HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002441 case 1:
2442 DRM_ERROR("Kicking stuck semaphore on %s\n",
2443 ring->name);
Chris Wilson09e14bf2013-10-10 09:37:19 +01002444 i915_handle_error(dev, false);
Chris Wilson6274f212013-06-10 11:20:21 +01002445 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002446 return HANGCHECK_KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002447 case 0:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002448 return HANGCHECK_WAIT;
Chris Wilson6274f212013-06-10 11:20:21 +01002449 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002450 }
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002451
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002452 return HANGCHECK_HUNG;
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002453}
2454
Ben Gamarif65d9422009-09-14 17:48:44 -04002455/**
2456 * This is called when the chip hasn't reported back with completed
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002457 * batchbuffers in a long time. We keep track per ring seqno progress and
2458 * if there are no progress, hangcheck score for that ring is increased.
2459 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2460 * we kick the ring. If we see no progress on three subsequent calls
2461 * we assume chip is wedged and try to fix it by resetting the chip.
Ben Gamarif65d9422009-09-14 17:48:44 -04002462 */
Damien Lespiaua658b5d2013-08-08 22:28:56 +01002463static void i915_hangcheck_elapsed(unsigned long data)
Ben Gamarif65d9422009-09-14 17:48:44 -04002464{
2465 struct drm_device *dev = (struct drm_device *)data;
2466 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002467 struct intel_ring_buffer *ring;
Chris Wilsonb4519512012-05-11 14:29:30 +01002468 int i;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002469 int busy_count = 0, rings_hung = 0;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002470 bool stuck[I915_NUM_RINGS] = { 0 };
2471#define BUSY 1
2472#define KICK 5
2473#define HUNG 20
2474#define FIRE 30
Chris Wilson893eead2010-10-27 14:44:35 +01002475
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07002476 if (!i915_enable_hangcheck)
2477 return;
2478
Chris Wilsonb4519512012-05-11 14:29:30 +01002479 for_each_ring(ring, dev_priv, i) {
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002480 u32 seqno, acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002481 bool busy = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01002482
Chris Wilson6274f212013-06-10 11:20:21 +01002483 semaphore_clear_deadlocks(dev_priv);
2484
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002485 seqno = ring->get_seqno(ring, false);
2486 acthd = intel_ring_get_active_head(ring);
Chris Wilsond1e61e72012-04-10 17:00:41 +01002487
Chris Wilson9107e9d2013-06-10 11:20:20 +01002488 if (ring->hangcheck.seqno == seqno) {
2489 if (ring_idle(ring, seqno)) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002490 ring->hangcheck.action = HANGCHECK_IDLE;
2491
Chris Wilson9107e9d2013-06-10 11:20:20 +01002492 if (waitqueue_active(&ring->irq_queue)) {
2493 /* Issue a wake-up to catch stuck h/w. */
Chris Wilson094f9a52013-09-25 17:34:55 +01002494 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
Daniel Vetterf4adcd22013-10-28 09:24:13 +01002495 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2496 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2497 ring->name);
2498 else
2499 DRM_INFO("Fake missed irq on %s\n",
2500 ring->name);
Chris Wilson094f9a52013-09-25 17:34:55 +01002501 wake_up_all(&ring->irq_queue);
2502 }
2503 /* Safeguard against driver failure */
2504 ring->hangcheck.score += BUSY;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002505 } else
2506 busy = false;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002507 } else {
Chris Wilson6274f212013-06-10 11:20:21 +01002508 /* We always increment the hangcheck score
2509 * if the ring is busy and still processing
2510 * the same request, so that no single request
2511 * can run indefinitely (such as a chain of
2512 * batches). The only time we do not increment
2513 * the hangcheck score on this ring, if this
2514 * ring is in a legitimate wait for another
2515 * ring. In that case the waiting ring is a
2516 * victim and we want to be sure we catch the
2517 * right culprit. Then every time we do kick
2518 * the ring, add a small increment to the
2519 * score so that we can catch a batch that is
2520 * being repeatedly kicked and so responsible
2521 * for stalling the machine.
2522 */
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002523 ring->hangcheck.action = ring_stuck(ring,
2524 acthd);
2525
2526 switch (ring->hangcheck.action) {
Mika Kuoppalada661462013-09-06 16:03:28 +03002527 case HANGCHECK_IDLE:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002528 case HANGCHECK_WAIT:
Chris Wilson6274f212013-06-10 11:20:21 +01002529 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002530 case HANGCHECK_ACTIVE:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002531 ring->hangcheck.score += BUSY;
Chris Wilson6274f212013-06-10 11:20:21 +01002532 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002533 case HANGCHECK_KICK:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002534 ring->hangcheck.score += KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01002535 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002536 case HANGCHECK_HUNG:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002537 ring->hangcheck.score += HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002538 stuck[i] = true;
2539 break;
2540 }
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002541 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002542 } else {
Mika Kuoppalada661462013-09-06 16:03:28 +03002543 ring->hangcheck.action = HANGCHECK_ACTIVE;
2544
Chris Wilson9107e9d2013-06-10 11:20:20 +01002545 /* Gradually reduce the count so that we catch DoS
2546 * attempts across multiple batches.
2547 */
2548 if (ring->hangcheck.score > 0)
2549 ring->hangcheck.score--;
Chris Wilsond1e61e72012-04-10 17:00:41 +01002550 }
2551
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002552 ring->hangcheck.seqno = seqno;
2553 ring->hangcheck.acthd = acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002554 busy_count += busy;
Chris Wilson893eead2010-10-27 14:44:35 +01002555 }
Eric Anholtb9201c12010-01-08 14:25:16 -08002556
Mika Kuoppala92cab732013-05-24 17:16:07 +03002557 for_each_ring(ring, dev_priv, i) {
Chris Wilson9107e9d2013-06-10 11:20:20 +01002558 if (ring->hangcheck.score > FIRE) {
Daniel Vetterb8d88d12013-08-28 10:57:59 +02002559 DRM_INFO("%s on %s\n",
2560 stuck[i] ? "stuck" : "no progress",
2561 ring->name);
Chris Wilsona43adf02013-06-10 11:20:22 +01002562 rings_hung++;
Mika Kuoppala92cab732013-05-24 17:16:07 +03002563 }
2564 }
2565
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002566 if (rings_hung)
2567 return i915_handle_error(dev, true);
Ben Gamarif65d9422009-09-14 17:48:44 -04002568
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002569 if (busy_count)
2570 /* Reset timer case chip hangs without another request
2571 * being added */
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002572 i915_queue_hangcheck(dev);
2573}
2574
2575void i915_queue_hangcheck(struct drm_device *dev)
2576{
2577 struct drm_i915_private *dev_priv = dev->dev_private;
2578 if (!i915_enable_hangcheck)
2579 return;
2580
2581 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2582 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
Ben Gamarif65d9422009-09-14 17:48:44 -04002583}
2584
Paulo Zanoni91738a92013-06-05 14:21:51 -03002585static void ibx_irq_preinstall(struct drm_device *dev)
2586{
2587 struct drm_i915_private *dev_priv = dev->dev_private;
2588
2589 if (HAS_PCH_NOP(dev))
2590 return;
2591
2592 /* south display irq */
2593 I915_WRITE(SDEIMR, 0xffffffff);
2594 /*
2595 * SDEIER is also touched by the interrupt handler to work around missed
2596 * PCH interrupts. Hence we can't update it after the interrupt handler
2597 * is enabled - instead we unconditionally enable all PCH interrupt
2598 * sources here, but then only unmask them as needed with SDEIMR.
2599 */
2600 I915_WRITE(SDEIER, 0xffffffff);
2601 POSTING_READ(SDEIER);
2602}
2603
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002604static void gen5_gt_irq_preinstall(struct drm_device *dev)
2605{
2606 struct drm_i915_private *dev_priv = dev->dev_private;
2607
2608 /* and GT */
2609 I915_WRITE(GTIMR, 0xffffffff);
2610 I915_WRITE(GTIER, 0x0);
2611 POSTING_READ(GTIER);
2612
2613 if (INTEL_INFO(dev)->gen >= 6) {
2614 /* and PM */
2615 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2616 I915_WRITE(GEN6_PMIER, 0x0);
2617 POSTING_READ(GEN6_PMIER);
2618 }
2619}
2620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621/* drm_dma.h hooks
2622*/
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002623static void ironlake_irq_preinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002624{
2625 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2626
Jesse Barnes46979952011-04-07 13:53:55 -07002627 atomic_set(&dev_priv->irq_received, 0);
2628
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002629 I915_WRITE(HWSTAM, 0xeffe);
Daniel Vetterbdfcdb62012-01-05 01:05:26 +01002630
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002631 I915_WRITE(DEIMR, 0xffffffff);
2632 I915_WRITE(DEIER, 0x0);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002633 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002634
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002635 gen5_gt_irq_preinstall(dev);
Zhenyu Wangc6501562009-11-03 18:57:21 +00002636
Paulo Zanoni91738a92013-06-05 14:21:51 -03002637 ibx_irq_preinstall(dev);
Ben Widawsky7d991632013-05-28 19:22:25 -07002638}
2639
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002640static void valleyview_irq_preinstall(struct drm_device *dev)
2641{
2642 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2643 int pipe;
2644
2645 atomic_set(&dev_priv->irq_received, 0);
2646
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002647 /* VLV magic */
2648 I915_WRITE(VLV_IMR, 0);
2649 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2650 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2651 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2652
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002653 /* and GT */
2654 I915_WRITE(GTIIR, I915_READ(GTIIR));
2655 I915_WRITE(GTIIR, I915_READ(GTIIR));
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002656
2657 gen5_gt_irq_preinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002658
2659 I915_WRITE(DPINVGTT, 0xff);
2660
2661 I915_WRITE(PORT_HOTPLUG_EN, 0);
2662 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2663 for_each_pipe(pipe)
2664 I915_WRITE(PIPESTAT(pipe), 0xffff);
2665 I915_WRITE(VLV_IIR, 0xffffffff);
2666 I915_WRITE(VLV_IMR, 0xffffffff);
2667 I915_WRITE(VLV_IER, 0x0);
2668 POSTING_READ(VLV_IER);
2669}
2670
Ben Widawskyabd58f02013-11-02 21:07:09 -07002671static void gen8_irq_preinstall(struct drm_device *dev)
2672{
2673 struct drm_i915_private *dev_priv = dev->dev_private;
2674 int pipe;
2675
2676 atomic_set(&dev_priv->irq_received, 0);
2677
2678 I915_WRITE(GEN8_MASTER_IRQ, 0);
2679 POSTING_READ(GEN8_MASTER_IRQ);
2680
2681 /* IIR can theoretically queue up two events. Be paranoid */
2682#define GEN8_IRQ_INIT_NDX(type, which) do { \
2683 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2684 POSTING_READ(GEN8_##type##_IMR(which)); \
2685 I915_WRITE(GEN8_##type##_IER(which), 0); \
2686 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2687 POSTING_READ(GEN8_##type##_IIR(which)); \
2688 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2689 } while (0)
2690
2691#define GEN8_IRQ_INIT(type) do { \
2692 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2693 POSTING_READ(GEN8_##type##_IMR); \
2694 I915_WRITE(GEN8_##type##_IER, 0); \
2695 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2696 POSTING_READ(GEN8_##type##_IIR); \
2697 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2698 } while (0)
2699
2700 GEN8_IRQ_INIT_NDX(GT, 0);
2701 GEN8_IRQ_INIT_NDX(GT, 1);
2702 GEN8_IRQ_INIT_NDX(GT, 2);
2703 GEN8_IRQ_INIT_NDX(GT, 3);
2704
2705 for_each_pipe(pipe) {
2706 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
2707 }
2708
2709 GEN8_IRQ_INIT(DE_PORT);
2710 GEN8_IRQ_INIT(DE_MISC);
2711 GEN8_IRQ_INIT(PCU);
2712#undef GEN8_IRQ_INIT
2713#undef GEN8_IRQ_INIT_NDX
2714
2715 POSTING_READ(GEN8_PCU_IIR);
Jesse Barnes09f23442014-01-10 13:13:09 -08002716
2717 ibx_irq_preinstall(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002718}
2719
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002720static void ibx_hpd_irq_setup(struct drm_device *dev)
Keith Packard7fe0b972011-09-19 13:31:02 -07002721{
2722 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002723 struct drm_mode_config *mode_config = &dev->mode_config;
2724 struct intel_encoder *intel_encoder;
Daniel Vetterfee884e2013-07-04 23:35:21 +02002725 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
Keith Packard7fe0b972011-09-19 13:31:02 -07002726
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002727 if (HAS_PCH_IBX(dev)) {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002728 hotplug_irqs = SDE_HOTPLUG_MASK;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002729 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002730 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002731 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002732 } else {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002733 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002734 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002735 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002736 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002737 }
2738
Daniel Vetterfee884e2013-07-04 23:35:21 +02002739 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002740
2741 /*
2742 * Enable digital hotplug on the PCH, and configure the DP short pulse
2743 * duration to 2ms (which is the minimum in the Display Port spec)
2744 *
2745 * This register is the same on all known PCH chips.
2746 */
Keith Packard7fe0b972011-09-19 13:31:02 -07002747 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2748 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2749 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2750 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2751 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2752 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2753}
2754
Paulo Zanonid46da432013-02-08 17:35:15 -02002755static void ibx_irq_postinstall(struct drm_device *dev)
2756{
2757 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002758 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02002759
Daniel Vetter692a04c2013-05-29 21:43:05 +02002760 if (HAS_PCH_NOP(dev))
2761 return;
2762
Paulo Zanoni86642812013-04-12 17:57:57 -03002763 if (HAS_PCH_IBX(dev)) {
2764 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
Paulo Zanonide032bf2013-04-12 17:57:58 -03002765 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
Paulo Zanoni86642812013-04-12 17:57:57 -03002766 } else {
2767 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2768
2769 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2770 }
Ben Widawskyab5c6082013-04-05 13:12:41 -07002771
Paulo Zanonid46da432013-02-08 17:35:15 -02002772 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2773 I915_WRITE(SDEIMR, ~mask);
Paulo Zanonid46da432013-02-08 17:35:15 -02002774}
2775
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002776static void gen5_gt_irq_postinstall(struct drm_device *dev)
2777{
2778 struct drm_i915_private *dev_priv = dev->dev_private;
2779 u32 pm_irqs, gt_irqs;
2780
2781 pm_irqs = gt_irqs = 0;
2782
2783 dev_priv->gt_irq_mask = ~0;
Ben Widawsky040d2ba2013-09-19 11:01:40 -07002784 if (HAS_L3_DPF(dev)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002785 /* L3 parity interrupt is always unmasked. */
Ben Widawsky35a85ac2013-09-19 11:13:41 -07002786 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
2787 gt_irqs |= GT_PARITY_ERROR(dev);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002788 }
2789
2790 gt_irqs |= GT_RENDER_USER_INTERRUPT;
2791 if (IS_GEN5(dev)) {
2792 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2793 ILK_BSD_USER_INTERRUPT;
2794 } else {
2795 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2796 }
2797
2798 I915_WRITE(GTIIR, I915_READ(GTIIR));
2799 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2800 I915_WRITE(GTIER, gt_irqs);
2801 POSTING_READ(GTIER);
2802
2803 if (INTEL_INFO(dev)->gen >= 6) {
2804 pm_irqs |= GEN6_PM_RPS_EVENTS;
2805
2806 if (HAS_VEBOX(dev))
2807 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2808
Paulo Zanoni605cd252013-08-06 18:57:15 -03002809 dev_priv->pm_irq_mask = 0xffffffff;
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002810 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
Paulo Zanoni605cd252013-08-06 18:57:15 -03002811 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002812 I915_WRITE(GEN6_PMIER, pm_irqs);
2813 POSTING_READ(GEN6_PMIER);
2814 }
2815}
2816
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002817static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002818{
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002819 unsigned long irqflags;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002820 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002821 u32 display_mask, extra_mask;
2822
2823 if (INTEL_INFO(dev)->gen >= 7) {
2824 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2825 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
2826 DE_PLANEB_FLIP_DONE_IVB |
2827 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2828 DE_ERR_INT_IVB);
2829 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2830 DE_PIPEA_VBLANK_IVB);
2831
2832 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2833 } else {
2834 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2835 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02002836 DE_AUX_CHANNEL_A |
2837 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
2838 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
2839 DE_POISON);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002840 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2841 }
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002842
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002843 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002844
2845 /* should always can generate irq */
2846 I915_WRITE(DEIIR, I915_READ(DEIIR));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002847 I915_WRITE(DEIMR, dev_priv->irq_mask);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002848 I915_WRITE(DEIER, display_mask | extra_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002849 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002850
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002851 gen5_gt_irq_postinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002852
Paulo Zanonid46da432013-02-08 17:35:15 -02002853 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07002854
Jesse Barnesf97108d2010-01-29 11:27:07 -08002855 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02002856 /* Enable PCU event interrupts
2857 *
2858 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002859 * setup is guaranteed to run in single-threaded context. But we
2860 * need it to make the assert_spin_locked happy. */
2861 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08002862 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002863 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08002864 }
2865
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002866 return 0;
2867}
2868
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002869static int valleyview_irq_postinstall(struct drm_device *dev)
2870{
2871 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002872 u32 enable_mask;
Daniel Vetter379ef822013-10-16 22:55:56 +02002873 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
2874 PIPE_CRC_DONE_ENABLE;
Daniel Vetterb79480b2013-06-27 17:52:10 +02002875 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002876
2877 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002878 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2879 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2880 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002881 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2882
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002883 /*
2884 *Leave vblank interrupts masked initially. enable/disable will
2885 * toggle them based on usage.
2886 */
2887 dev_priv->irq_mask = (~enable_mask) |
2888 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2889 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002890
Daniel Vetter20afbda2012-12-11 14:05:07 +01002891 I915_WRITE(PORT_HOTPLUG_EN, 0);
2892 POSTING_READ(PORT_HOTPLUG_EN);
2893
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002894 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2895 I915_WRITE(VLV_IER, enable_mask);
2896 I915_WRITE(VLV_IIR, 0xffffffff);
2897 I915_WRITE(PIPESTAT(0), 0xffff);
2898 I915_WRITE(PIPESTAT(1), 0xffff);
2899 POSTING_READ(VLV_IER);
2900
Daniel Vetterb79480b2013-06-27 17:52:10 +02002901 /* Interrupt setup is already guaranteed to be single-threaded, this is
2902 * just to make the assert_spin_locked check happy. */
2903 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02002904 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
2905 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
2906 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
Daniel Vetterb79480b2013-06-27 17:52:10 +02002907 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002908
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002909 I915_WRITE(VLV_IIR, 0xffffffff);
2910 I915_WRITE(VLV_IIR, 0xffffffff);
2911
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002912 gen5_gt_irq_postinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002913
2914 /* ack & enable invalid PTE error interrupts */
2915#if 0 /* FIXME: add support to irq handler for checking these bits */
2916 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2917 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2918#endif
2919
2920 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Daniel Vetter20afbda2012-12-11 14:05:07 +01002921
2922 return 0;
2923}
2924
Ben Widawskyabd58f02013-11-02 21:07:09 -07002925static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
2926{
2927 int i;
2928
2929 /* These are interrupts we'll toggle with the ring mask register */
2930 uint32_t gt_interrupts[] = {
2931 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
2932 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
2933 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
2934 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
2935 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
2936 0,
2937 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
2938 };
2939
2940 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
2941 u32 tmp = I915_READ(GEN8_GT_IIR(i));
2942 if (tmp)
2943 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2944 i, tmp);
2945 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
2946 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
2947 }
2948 POSTING_READ(GEN8_GT_IER(0));
2949}
2950
2951static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
2952{
2953 struct drm_device *dev = dev_priv->dev;
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01002954 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
2955 GEN8_PIPE_CDCLK_CRC_DONE |
2956 GEN8_PIPE_FIFO_UNDERRUN |
2957 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2958 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002959 int pipe;
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01002960 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
2961 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
2962 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002963
2964 for_each_pipe(pipe) {
2965 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2966 if (tmp)
2967 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2968 pipe, tmp);
2969 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2970 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
2971 }
2972 POSTING_READ(GEN8_DE_PIPE_ISR(0));
2973
Daniel Vetter6d766f02013-11-07 14:49:55 +01002974 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
2975 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002976 POSTING_READ(GEN8_DE_PORT_IER);
2977}
2978
2979static int gen8_irq_postinstall(struct drm_device *dev)
2980{
2981 struct drm_i915_private *dev_priv = dev->dev_private;
2982
2983 gen8_gt_irq_postinstall(dev_priv);
2984 gen8_de_irq_postinstall(dev_priv);
2985
2986 ibx_irq_postinstall(dev);
2987
2988 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2989 POSTING_READ(GEN8_MASTER_IRQ);
2990
2991 return 0;
2992}
2993
2994static void gen8_irq_uninstall(struct drm_device *dev)
2995{
2996 struct drm_i915_private *dev_priv = dev->dev_private;
2997 int pipe;
2998
2999 if (!dev_priv)
3000 return;
3001
3002 atomic_set(&dev_priv->irq_received, 0);
3003
3004 I915_WRITE(GEN8_MASTER_IRQ, 0);
3005
3006#define GEN8_IRQ_FINI_NDX(type, which) do { \
3007 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3008 I915_WRITE(GEN8_##type##_IER(which), 0); \
3009 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3010 } while (0)
3011
3012#define GEN8_IRQ_FINI(type) do { \
3013 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3014 I915_WRITE(GEN8_##type##_IER, 0); \
3015 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3016 } while (0)
3017
3018 GEN8_IRQ_FINI_NDX(GT, 0);
3019 GEN8_IRQ_FINI_NDX(GT, 1);
3020 GEN8_IRQ_FINI_NDX(GT, 2);
3021 GEN8_IRQ_FINI_NDX(GT, 3);
3022
3023 for_each_pipe(pipe) {
3024 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
3025 }
3026
3027 GEN8_IRQ_FINI(DE_PORT);
3028 GEN8_IRQ_FINI(DE_MISC);
3029 GEN8_IRQ_FINI(PCU);
3030#undef GEN8_IRQ_FINI
3031#undef GEN8_IRQ_FINI_NDX
3032
3033 POSTING_READ(GEN8_PCU_IIR);
3034}
3035
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003036static void valleyview_irq_uninstall(struct drm_device *dev)
3037{
3038 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3039 int pipe;
3040
3041 if (!dev_priv)
3042 return;
3043
Egbert Eichac4c16c2013-04-16 13:36:58 +02003044 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3045
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003046 for_each_pipe(pipe)
3047 I915_WRITE(PIPESTAT(pipe), 0xffff);
3048
3049 I915_WRITE(HWSTAM, 0xffffffff);
3050 I915_WRITE(PORT_HOTPLUG_EN, 0);
3051 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3052 for_each_pipe(pipe)
3053 I915_WRITE(PIPESTAT(pipe), 0xffff);
3054 I915_WRITE(VLV_IIR, 0xffffffff);
3055 I915_WRITE(VLV_IMR, 0xffffffff);
3056 I915_WRITE(VLV_IER, 0x0);
3057 POSTING_READ(VLV_IER);
3058}
3059
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003060static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003061{
3062 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes46979952011-04-07 13:53:55 -07003063
3064 if (!dev_priv)
3065 return;
3066
Egbert Eichac4c16c2013-04-16 13:36:58 +02003067 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3068
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003069 I915_WRITE(HWSTAM, 0xffffffff);
3070
3071 I915_WRITE(DEIMR, 0xffffffff);
3072 I915_WRITE(DEIER, 0x0);
3073 I915_WRITE(DEIIR, I915_READ(DEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03003074 if (IS_GEN7(dev))
3075 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003076
3077 I915_WRITE(GTIMR, 0xffffffff);
3078 I915_WRITE(GTIER, 0x0);
3079 I915_WRITE(GTIIR, I915_READ(GTIIR));
Keith Packard192aac1f2011-09-20 10:12:44 -07003080
Ben Widawskyab5c6082013-04-05 13:12:41 -07003081 if (HAS_PCH_NOP(dev))
3082 return;
3083
Keith Packard192aac1f2011-09-20 10:12:44 -07003084 I915_WRITE(SDEIMR, 0xffffffff);
3085 I915_WRITE(SDEIER, 0x0);
3086 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03003087 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3088 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003089}
3090
Chris Wilsonc2798b12012-04-22 21:13:57 +01003091static void i8xx_irq_preinstall(struct drm_device * dev)
3092{
3093 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3094 int pipe;
3095
3096 atomic_set(&dev_priv->irq_received, 0);
3097
3098 for_each_pipe(pipe)
3099 I915_WRITE(PIPESTAT(pipe), 0);
3100 I915_WRITE16(IMR, 0xffff);
3101 I915_WRITE16(IER, 0x0);
3102 POSTING_READ16(IER);
3103}
3104
3105static int i8xx_irq_postinstall(struct drm_device *dev)
3106{
3107 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter379ef822013-10-16 22:55:56 +02003108 unsigned long irqflags;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003109
Chris Wilsonc2798b12012-04-22 21:13:57 +01003110 I915_WRITE16(EMR,
3111 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3112
3113 /* Unmask the interrupts that we always want on. */
3114 dev_priv->irq_mask =
3115 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3116 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3117 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3118 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3119 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3120 I915_WRITE16(IMR, dev_priv->irq_mask);
3121
3122 I915_WRITE16(IER,
3123 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3124 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3125 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3126 I915_USER_INTERRUPT);
3127 POSTING_READ16(IER);
3128
Daniel Vetter379ef822013-10-16 22:55:56 +02003129 /* Interrupt setup is already guaranteed to be single-threaded, this is
3130 * just to make the assert_spin_locked check happy. */
3131 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02003132 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3133 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
Daniel Vetter379ef822013-10-16 22:55:56 +02003134 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3135
Chris Wilsonc2798b12012-04-22 21:13:57 +01003136 return 0;
3137}
3138
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003139/*
3140 * Returns true when a page flip has completed.
3141 */
3142static bool i8xx_handle_vblank(struct drm_device *dev,
3143 int pipe, u16 iir)
3144{
3145 drm_i915_private_t *dev_priv = dev->dev_private;
3146 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
3147
3148 if (!drm_handle_vblank(dev, pipe))
3149 return false;
3150
3151 if ((iir & flip_pending) == 0)
3152 return false;
3153
3154 intel_prepare_page_flip(dev, pipe);
3155
3156 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3157 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3158 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3159 * the flip is completed (no longer pending). Since this doesn't raise
3160 * an interrupt per se, we watch for the change at vblank.
3161 */
3162 if (I915_READ16(ISR) & flip_pending)
3163 return false;
3164
3165 intel_finish_page_flip(dev, pipe);
3166
3167 return true;
3168}
3169
Daniel Vetterff1f5252012-10-02 15:10:55 +02003170static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003171{
3172 struct drm_device *dev = (struct drm_device *) arg;
3173 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003174 u16 iir, new_iir;
3175 u32 pipe_stats[2];
3176 unsigned long irqflags;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003177 int pipe;
3178 u16 flip_mask =
3179 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3180 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3181
3182 atomic_inc(&dev_priv->irq_received);
3183
3184 iir = I915_READ16(IIR);
3185 if (iir == 0)
3186 return IRQ_NONE;
3187
3188 while (iir & ~flip_mask) {
3189 /* Can't rely on pipestat interrupt bit in iir as it might
3190 * have been cleared after the pipestat interrupt was received.
3191 * It doesn't set the bit in iir again, but it still produces
3192 * interrupts (for non-MSI).
3193 */
3194 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3195 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3196 i915_handle_error(dev, false);
3197
3198 for_each_pipe(pipe) {
3199 int reg = PIPESTAT(pipe);
3200 pipe_stats[pipe] = I915_READ(reg);
3201
3202 /*
3203 * Clear the PIPE*STAT regs before the IIR
3204 */
3205 if (pipe_stats[pipe] & 0x8000ffff) {
3206 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3207 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3208 pipe_name(pipe));
3209 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003210 }
3211 }
3212 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3213
3214 I915_WRITE16(IIR, iir & ~flip_mask);
3215 new_iir = I915_READ16(IIR); /* Flush posted writes */
3216
Daniel Vetterd05c6172012-04-26 23:28:09 +02003217 i915_update_dri1_breadcrumb(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003218
3219 if (iir & I915_USER_INTERRUPT)
3220 notify_ring(dev, &dev_priv->ring[RCS]);
3221
Daniel Vetter4356d582013-10-16 22:55:55 +02003222 for_each_pipe(pipe) {
3223 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3224 i8xx_handle_vblank(dev, pipe, iir))
3225 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003226
Daniel Vetter4356d582013-10-16 22:55:55 +02003227 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003228 i9xx_pipe_crc_irq_handler(dev, pipe);
Daniel Vetter4356d582013-10-16 22:55:55 +02003229 }
Chris Wilsonc2798b12012-04-22 21:13:57 +01003230
3231 iir = new_iir;
3232 }
3233
3234 return IRQ_HANDLED;
3235}
3236
3237static void i8xx_irq_uninstall(struct drm_device * dev)
3238{
3239 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3240 int pipe;
3241
Chris Wilsonc2798b12012-04-22 21:13:57 +01003242 for_each_pipe(pipe) {
3243 /* Clear enable bits; then clear status bits */
3244 I915_WRITE(PIPESTAT(pipe), 0);
3245 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3246 }
3247 I915_WRITE16(IMR, 0xffff);
3248 I915_WRITE16(IER, 0x0);
3249 I915_WRITE16(IIR, I915_READ16(IIR));
3250}
3251
Chris Wilsona266c7d2012-04-24 22:59:44 +01003252static void i915_irq_preinstall(struct drm_device * dev)
3253{
3254 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3255 int pipe;
3256
3257 atomic_set(&dev_priv->irq_received, 0);
3258
3259 if (I915_HAS_HOTPLUG(dev)) {
3260 I915_WRITE(PORT_HOTPLUG_EN, 0);
3261 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3262 }
3263
Chris Wilson00d98eb2012-04-24 22:59:48 +01003264 I915_WRITE16(HWSTAM, 0xeffe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003265 for_each_pipe(pipe)
3266 I915_WRITE(PIPESTAT(pipe), 0);
3267 I915_WRITE(IMR, 0xffffffff);
3268 I915_WRITE(IER, 0x0);
3269 POSTING_READ(IER);
3270}
3271
3272static int i915_irq_postinstall(struct drm_device *dev)
3273{
3274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson38bde182012-04-24 22:59:50 +01003275 u32 enable_mask;
Daniel Vetter379ef822013-10-16 22:55:56 +02003276 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003277
Chris Wilson38bde182012-04-24 22:59:50 +01003278 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3279
3280 /* Unmask the interrupts that we always want on. */
3281 dev_priv->irq_mask =
3282 ~(I915_ASLE_INTERRUPT |
3283 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3284 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3285 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3286 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3287 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3288
3289 enable_mask =
3290 I915_ASLE_INTERRUPT |
3291 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3292 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3293 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3294 I915_USER_INTERRUPT;
3295
Chris Wilsona266c7d2012-04-24 22:59:44 +01003296 if (I915_HAS_HOTPLUG(dev)) {
Daniel Vetter20afbda2012-12-11 14:05:07 +01003297 I915_WRITE(PORT_HOTPLUG_EN, 0);
3298 POSTING_READ(PORT_HOTPLUG_EN);
3299
Chris Wilsona266c7d2012-04-24 22:59:44 +01003300 /* Enable in IER... */
3301 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3302 /* and unmask in IMR */
3303 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3304 }
3305
Chris Wilsona266c7d2012-04-24 22:59:44 +01003306 I915_WRITE(IMR, dev_priv->irq_mask);
3307 I915_WRITE(IER, enable_mask);
3308 POSTING_READ(IER);
3309
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003310 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003311
Daniel Vetter379ef822013-10-16 22:55:56 +02003312 /* Interrupt setup is already guaranteed to be single-threaded, this is
3313 * just to make the assert_spin_locked check happy. */
3314 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02003315 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3316 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
Daniel Vetter379ef822013-10-16 22:55:56 +02003317 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3318
Daniel Vetter20afbda2012-12-11 14:05:07 +01003319 return 0;
3320}
3321
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003322/*
3323 * Returns true when a page flip has completed.
3324 */
3325static bool i915_handle_vblank(struct drm_device *dev,
3326 int plane, int pipe, u32 iir)
3327{
3328 drm_i915_private_t *dev_priv = dev->dev_private;
3329 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3330
3331 if (!drm_handle_vblank(dev, pipe))
3332 return false;
3333
3334 if ((iir & flip_pending) == 0)
3335 return false;
3336
3337 intel_prepare_page_flip(dev, plane);
3338
3339 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3340 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3341 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3342 * the flip is completed (no longer pending). Since this doesn't raise
3343 * an interrupt per se, we watch for the change at vblank.
3344 */
3345 if (I915_READ(ISR) & flip_pending)
3346 return false;
3347
3348 intel_finish_page_flip(dev, pipe);
3349
3350 return true;
3351}
3352
Daniel Vetterff1f5252012-10-02 15:10:55 +02003353static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003354{
3355 struct drm_device *dev = (struct drm_device *) arg;
3356 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson8291ee92012-04-24 22:59:47 +01003357 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003358 unsigned long irqflags;
Chris Wilson38bde182012-04-24 22:59:50 +01003359 u32 flip_mask =
3360 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3361 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilson38bde182012-04-24 22:59:50 +01003362 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003363
3364 atomic_inc(&dev_priv->irq_received);
3365
3366 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01003367 do {
3368 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01003369 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003370
3371 /* Can't rely on pipestat interrupt bit in iir as it might
3372 * have been cleared after the pipestat interrupt was received.
3373 * It doesn't set the bit in iir again, but it still produces
3374 * interrupts (for non-MSI).
3375 */
3376 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3377 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3378 i915_handle_error(dev, false);
3379
3380 for_each_pipe(pipe) {
3381 int reg = PIPESTAT(pipe);
3382 pipe_stats[pipe] = I915_READ(reg);
3383
Chris Wilson38bde182012-04-24 22:59:50 +01003384 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003385 if (pipe_stats[pipe] & 0x8000ffff) {
3386 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3387 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3388 pipe_name(pipe));
3389 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01003390 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003391 }
3392 }
3393 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3394
3395 if (!irq_received)
3396 break;
3397
Chris Wilsona266c7d2012-04-24 22:59:44 +01003398 /* Consume port. Then clear IIR or we'll miss events */
3399 if ((I915_HAS_HOTPLUG(dev)) &&
3400 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3401 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02003402 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003403
3404 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3405 hotplug_status);
Daniel Vetter91d131d2013-06-27 17:52:14 +02003406
3407 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3408
Chris Wilsona266c7d2012-04-24 22:59:44 +01003409 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
Chris Wilson38bde182012-04-24 22:59:50 +01003410 POSTING_READ(PORT_HOTPLUG_STAT);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003411 }
3412
Chris Wilson38bde182012-04-24 22:59:50 +01003413 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003414 new_iir = I915_READ(IIR); /* Flush posted writes */
3415
Chris Wilsona266c7d2012-04-24 22:59:44 +01003416 if (iir & I915_USER_INTERRUPT)
3417 notify_ring(dev, &dev_priv->ring[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003418
Chris Wilsona266c7d2012-04-24 22:59:44 +01003419 for_each_pipe(pipe) {
Chris Wilson38bde182012-04-24 22:59:50 +01003420 int plane = pipe;
3421 if (IS_MOBILE(dev))
3422 plane = !plane;
Ville Syrjälä5e2032d2013-02-19 15:16:38 +02003423
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003424 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3425 i915_handle_vblank(dev, plane, pipe, iir))
3426 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003427
3428 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3429 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003430
3431 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003432 i9xx_pipe_crc_irq_handler(dev, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003433 }
3434
Chris Wilsona266c7d2012-04-24 22:59:44 +01003435 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3436 intel_opregion_asle_intr(dev);
3437
3438 /* With MSI, interrupts are only generated when iir
3439 * transitions from zero to nonzero. If another bit got
3440 * set while we were handling the existing iir bits, then
3441 * we would never get another interrupt.
3442 *
3443 * This is fine on non-MSI as well, as if we hit this path
3444 * we avoid exiting the interrupt handler only to generate
3445 * another one.
3446 *
3447 * Note that for MSI this could cause a stray interrupt report
3448 * if an interrupt landed in the time between writing IIR and
3449 * the posting read. This should be rare enough to never
3450 * trigger the 99% of 100,000 interrupts test for disabling
3451 * stray interrupts.
3452 */
Chris Wilson38bde182012-04-24 22:59:50 +01003453 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003454 iir = new_iir;
Chris Wilson38bde182012-04-24 22:59:50 +01003455 } while (iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003456
Daniel Vetterd05c6172012-04-26 23:28:09 +02003457 i915_update_dri1_breadcrumb(dev);
Chris Wilson8291ee92012-04-24 22:59:47 +01003458
Chris Wilsona266c7d2012-04-24 22:59:44 +01003459 return ret;
3460}
3461
3462static void i915_irq_uninstall(struct drm_device * dev)
3463{
3464 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3465 int pipe;
3466
Egbert Eichac4c16c2013-04-16 13:36:58 +02003467 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3468
Chris Wilsona266c7d2012-04-24 22:59:44 +01003469 if (I915_HAS_HOTPLUG(dev)) {
3470 I915_WRITE(PORT_HOTPLUG_EN, 0);
3471 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3472 }
3473
Chris Wilson00d98eb2012-04-24 22:59:48 +01003474 I915_WRITE16(HWSTAM, 0xffff);
Chris Wilson55b39752012-04-24 22:59:49 +01003475 for_each_pipe(pipe) {
3476 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003477 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01003478 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3479 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003480 I915_WRITE(IMR, 0xffffffff);
3481 I915_WRITE(IER, 0x0);
3482
Chris Wilsona266c7d2012-04-24 22:59:44 +01003483 I915_WRITE(IIR, I915_READ(IIR));
3484}
3485
3486static void i965_irq_preinstall(struct drm_device * dev)
3487{
3488 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3489 int pipe;
3490
3491 atomic_set(&dev_priv->irq_received, 0);
3492
Chris Wilsonadca4732012-05-11 18:01:31 +01003493 I915_WRITE(PORT_HOTPLUG_EN, 0);
3494 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003495
3496 I915_WRITE(HWSTAM, 0xeffe);
3497 for_each_pipe(pipe)
3498 I915_WRITE(PIPESTAT(pipe), 0);
3499 I915_WRITE(IMR, 0xffffffff);
3500 I915_WRITE(IER, 0x0);
3501 POSTING_READ(IER);
3502}
3503
3504static int i965_irq_postinstall(struct drm_device *dev)
3505{
3506 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003507 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003508 u32 error_mask;
Daniel Vetterb79480b2013-06-27 17:52:10 +02003509 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003510
Chris Wilsona266c7d2012-04-24 22:59:44 +01003511 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003512 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01003513 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003514 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3515 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3516 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3517 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3518 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3519
3520 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003521 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3522 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003523 enable_mask |= I915_USER_INTERRUPT;
3524
3525 if (IS_G4X(dev))
3526 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003527
Daniel Vetterb79480b2013-06-27 17:52:10 +02003528 /* Interrupt setup is already guaranteed to be single-threaded, this is
3529 * just to make the assert_spin_locked check happy. */
3530 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter3b6c42e2013-10-21 18:04:35 +02003531 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
3532 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3533 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
Daniel Vetterb79480b2013-06-27 17:52:10 +02003534 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003535
Chris Wilsona266c7d2012-04-24 22:59:44 +01003536 /*
3537 * Enable some error detection, note the instruction error mask
3538 * bit is reserved, so we leave it masked.
3539 */
3540 if (IS_G4X(dev)) {
3541 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3542 GM45_ERROR_MEM_PRIV |
3543 GM45_ERROR_CP_PRIV |
3544 I915_ERROR_MEMORY_REFRESH);
3545 } else {
3546 error_mask = ~(I915_ERROR_PAGE_TABLE |
3547 I915_ERROR_MEMORY_REFRESH);
3548 }
3549 I915_WRITE(EMR, error_mask);
3550
3551 I915_WRITE(IMR, dev_priv->irq_mask);
3552 I915_WRITE(IER, enable_mask);
3553 POSTING_READ(IER);
3554
Daniel Vetter20afbda2012-12-11 14:05:07 +01003555 I915_WRITE(PORT_HOTPLUG_EN, 0);
3556 POSTING_READ(PORT_HOTPLUG_EN);
3557
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003558 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003559
3560 return 0;
3561}
3562
Egbert Eichbac56d52013-02-25 12:06:51 -05003563static void i915_hpd_irq_setup(struct drm_device *dev)
Daniel Vetter20afbda2012-12-11 14:05:07 +01003564{
3565 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Egbert Eiche5868a32013-02-28 04:17:12 -05003566 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +02003567 struct intel_encoder *intel_encoder;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003568 u32 hotplug_en;
3569
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003570 assert_spin_locked(&dev_priv->irq_lock);
3571
Egbert Eichbac56d52013-02-25 12:06:51 -05003572 if (I915_HAS_HOTPLUG(dev)) {
3573 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3574 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3575 /* Note HDMI and DP share hotplug bits */
Egbert Eiche5868a32013-02-28 04:17:12 -05003576 /* enable bits are the same for all generations */
Egbert Eichcd569ae2013-04-16 13:36:57 +02003577 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3578 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3579 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
Egbert Eichbac56d52013-02-25 12:06:51 -05003580 /* Programming the CRT detection parameters tends
3581 to generate a spurious hotplug event about three
3582 seconds later. So just do it once.
3583 */
3584 if (IS_G4X(dev))
3585 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Daniel Vetter85fc95b2013-03-27 15:47:11 +01003586 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
Egbert Eichbac56d52013-02-25 12:06:51 -05003587 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003588
Egbert Eichbac56d52013-02-25 12:06:51 -05003589 /* Ignore TV since it's buggy */
3590 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3591 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003592}
3593
Daniel Vetterff1f5252012-10-02 15:10:55 +02003594static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003595{
3596 struct drm_device *dev = (struct drm_device *) arg;
3597 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003598 u32 iir, new_iir;
3599 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003600 unsigned long irqflags;
3601 int irq_received;
3602 int ret = IRQ_NONE, pipe;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003603 u32 flip_mask =
3604 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3605 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003606
3607 atomic_inc(&dev_priv->irq_received);
3608
3609 iir = I915_READ(IIR);
3610
Chris Wilsona266c7d2012-04-24 22:59:44 +01003611 for (;;) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01003612 bool blc_event = false;
3613
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003614 irq_received = (iir & ~flip_mask) != 0;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003615
3616 /* Can't rely on pipestat interrupt bit in iir as it might
3617 * have been cleared after the pipestat interrupt was received.
3618 * It doesn't set the bit in iir again, but it still produces
3619 * interrupts (for non-MSI).
3620 */
3621 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3622 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3623 i915_handle_error(dev, false);
3624
3625 for_each_pipe(pipe) {
3626 int reg = PIPESTAT(pipe);
3627 pipe_stats[pipe] = I915_READ(reg);
3628
3629 /*
3630 * Clear the PIPE*STAT regs before the IIR
3631 */
3632 if (pipe_stats[pipe] & 0x8000ffff) {
3633 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3634 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3635 pipe_name(pipe));
3636 I915_WRITE(reg, pipe_stats[pipe]);
3637 irq_received = 1;
3638 }
3639 }
3640 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3641
3642 if (!irq_received)
3643 break;
3644
3645 ret = IRQ_HANDLED;
3646
3647 /* Consume port. Then clear IIR or we'll miss events */
Chris Wilsonadca4732012-05-11 18:01:31 +01003648 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003649 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02003650 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3651 HOTPLUG_INT_STATUS_G4X :
Daniel Vetter4f7fd702013-06-24 21:33:28 +02003652 HOTPLUG_INT_STATUS_I915);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003653
3654 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3655 hotplug_status);
Daniel Vetter91d131d2013-06-27 17:52:14 +02003656
3657 intel_hpd_irq_handler(dev, hotplug_trigger,
3658 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
3659
Chris Wilsona266c7d2012-04-24 22:59:44 +01003660 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3661 I915_READ(PORT_HOTPLUG_STAT);
3662 }
3663
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003664 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003665 new_iir = I915_READ(IIR); /* Flush posted writes */
3666
Chris Wilsona266c7d2012-04-24 22:59:44 +01003667 if (iir & I915_USER_INTERRUPT)
3668 notify_ring(dev, &dev_priv->ring[RCS]);
3669 if (iir & I915_BSD_USER_INTERRUPT)
3670 notify_ring(dev, &dev_priv->ring[VCS]);
3671
Chris Wilsona266c7d2012-04-24 22:59:44 +01003672 for_each_pipe(pipe) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01003673 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003674 i915_handle_vblank(dev, pipe, pipe, iir))
3675 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003676
3677 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3678 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003679
3680 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Daniel Vetter277de952013-10-18 16:37:07 +02003681 i9xx_pipe_crc_irq_handler(dev, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003682 }
3683
3684
3685 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3686 intel_opregion_asle_intr(dev);
3687
Daniel Vetter515ac2b2012-12-01 13:53:44 +01003688 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3689 gmbus_irq_handler(dev);
3690
Chris Wilsona266c7d2012-04-24 22:59:44 +01003691 /* With MSI, interrupts are only generated when iir
3692 * transitions from zero to nonzero. If another bit got
3693 * set while we were handling the existing iir bits, then
3694 * we would never get another interrupt.
3695 *
3696 * This is fine on non-MSI as well, as if we hit this path
3697 * we avoid exiting the interrupt handler only to generate
3698 * another one.
3699 *
3700 * Note that for MSI this could cause a stray interrupt report
3701 * if an interrupt landed in the time between writing IIR and
3702 * the posting read. This should be rare enough to never
3703 * trigger the 99% of 100,000 interrupts test for disabling
3704 * stray interrupts.
3705 */
3706 iir = new_iir;
3707 }
3708
Daniel Vetterd05c6172012-04-26 23:28:09 +02003709 i915_update_dri1_breadcrumb(dev);
Chris Wilson2c8ba292012-04-24 22:59:46 +01003710
Chris Wilsona266c7d2012-04-24 22:59:44 +01003711 return ret;
3712}
3713
3714static void i965_irq_uninstall(struct drm_device * dev)
3715{
3716 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3717 int pipe;
3718
3719 if (!dev_priv)
3720 return;
3721
Egbert Eichac4c16c2013-04-16 13:36:58 +02003722 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3723
Chris Wilsonadca4732012-05-11 18:01:31 +01003724 I915_WRITE(PORT_HOTPLUG_EN, 0);
3725 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003726
3727 I915_WRITE(HWSTAM, 0xffffffff);
3728 for_each_pipe(pipe)
3729 I915_WRITE(PIPESTAT(pipe), 0);
3730 I915_WRITE(IMR, 0xffffffff);
3731 I915_WRITE(IER, 0x0);
3732
3733 for_each_pipe(pipe)
3734 I915_WRITE(PIPESTAT(pipe),
3735 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3736 I915_WRITE(IIR, I915_READ(IIR));
3737}
3738
Egbert Eichac4c16c2013-04-16 13:36:58 +02003739static void i915_reenable_hotplug_timer_func(unsigned long data)
3740{
3741 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3742 struct drm_device *dev = dev_priv->dev;
3743 struct drm_mode_config *mode_config = &dev->mode_config;
3744 unsigned long irqflags;
3745 int i;
3746
3747 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3748 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3749 struct drm_connector *connector;
3750
3751 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3752 continue;
3753
3754 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3755
3756 list_for_each_entry(connector, &mode_config->connector_list, head) {
3757 struct intel_connector *intel_connector = to_intel_connector(connector);
3758
3759 if (intel_connector->encoder->hpd_pin == i) {
3760 if (connector->polled != intel_connector->polled)
3761 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3762 drm_get_connector_name(connector));
3763 connector->polled = intel_connector->polled;
3764 if (!connector->polled)
3765 connector->polled = DRM_CONNECTOR_POLL_HPD;
3766 }
3767 }
3768 }
3769 if (dev_priv->display.hpd_irq_setup)
3770 dev_priv->display.hpd_irq_setup(dev);
3771 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3772}
3773
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003774void intel_irq_init(struct drm_device *dev)
3775{
Chris Wilson8b2e3262012-04-24 22:59:41 +01003776 struct drm_i915_private *dev_priv = dev->dev_private;
3777
3778 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
Daniel Vetter99584db2012-11-14 17:14:04 +01003779 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003780 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01003781 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Chris Wilson8b2e3262012-04-24 22:59:41 +01003782
Daniel Vetter99584db2012-11-14 17:14:04 +01003783 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3784 i915_hangcheck_elapsed,
Daniel Vetter61bac782012-12-01 21:03:21 +01003785 (unsigned long) dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003786 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3787 (unsigned long) dev_priv);
Daniel Vetter61bac782012-12-01 21:03:21 +01003788
Tomas Janousek97a19a22012-12-08 13:48:13 +01003789 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01003790
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +03003791 if (IS_GEN2(dev)) {
3792 dev->max_vblank_count = 0;
3793 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
3794 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003795 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3796 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
Ville Syrjälä391f75e2013-09-25 19:55:26 +03003797 } else {
3798 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3799 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003800 }
3801
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03003802 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
Keith Packardc3613de2011-08-12 17:05:54 -07003803 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03003804 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3805 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003806
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003807 if (IS_VALLEYVIEW(dev)) {
3808 dev->driver->irq_handler = valleyview_irq_handler;
3809 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3810 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3811 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3812 dev->driver->enable_vblank = valleyview_enable_vblank;
3813 dev->driver->disable_vblank = valleyview_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05003814 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003815 } else if (IS_GEN8(dev)) {
3816 dev->driver->irq_handler = gen8_irq_handler;
3817 dev->driver->irq_preinstall = gen8_irq_preinstall;
3818 dev->driver->irq_postinstall = gen8_irq_postinstall;
3819 dev->driver->irq_uninstall = gen8_irq_uninstall;
3820 dev->driver->enable_vblank = gen8_enable_vblank;
3821 dev->driver->disable_vblank = gen8_disable_vblank;
3822 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003823 } else if (HAS_PCH_SPLIT(dev)) {
3824 dev->driver->irq_handler = ironlake_irq_handler;
3825 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3826 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3827 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3828 dev->driver->enable_vblank = ironlake_enable_vblank;
3829 dev->driver->disable_vblank = ironlake_disable_vblank;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003830 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003831 } else {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003832 if (INTEL_INFO(dev)->gen == 2) {
3833 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3834 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3835 dev->driver->irq_handler = i8xx_irq_handler;
3836 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003837 } else if (INTEL_INFO(dev)->gen == 3) {
3838 dev->driver->irq_preinstall = i915_irq_preinstall;
3839 dev->driver->irq_postinstall = i915_irq_postinstall;
3840 dev->driver->irq_uninstall = i915_irq_uninstall;
3841 dev->driver->irq_handler = i915_irq_handler;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003842 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003843 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003844 dev->driver->irq_preinstall = i965_irq_preinstall;
3845 dev->driver->irq_postinstall = i965_irq_postinstall;
3846 dev->driver->irq_uninstall = i965_irq_uninstall;
3847 dev->driver->irq_handler = i965_irq_handler;
Egbert Eichbac56d52013-02-25 12:06:51 -05003848 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003849 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003850 dev->driver->enable_vblank = i915_enable_vblank;
3851 dev->driver->disable_vblank = i915_disable_vblank;
3852 }
3853}
Daniel Vetter20afbda2012-12-11 14:05:07 +01003854
3855void intel_hpd_init(struct drm_device *dev)
3856{
3857 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eich821450c2013-04-16 13:36:55 +02003858 struct drm_mode_config *mode_config = &dev->mode_config;
3859 struct drm_connector *connector;
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003860 unsigned long irqflags;
Egbert Eich821450c2013-04-16 13:36:55 +02003861 int i;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003862
Egbert Eich821450c2013-04-16 13:36:55 +02003863 for (i = 1; i < HPD_NUM_PINS; i++) {
3864 dev_priv->hpd_stats[i].hpd_cnt = 0;
3865 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3866 }
3867 list_for_each_entry(connector, &mode_config->connector_list, head) {
3868 struct intel_connector *intel_connector = to_intel_connector(connector);
3869 connector->polled = intel_connector->polled;
3870 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3871 connector->polled = DRM_CONNECTOR_POLL_HPD;
3872 }
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003873
3874 /* Interrupt setup is already guaranteed to be single-threaded, this is
3875 * just to make the assert_spin_locked checks happy. */
3876 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003877 if (dev_priv->display.hpd_irq_setup)
3878 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003879 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003880}
Paulo Zanonic67a4702013-08-19 13:18:09 -03003881
3882/* Disable interrupts so we can allow Package C8+. */
3883void hsw_pc8_disable_interrupts(struct drm_device *dev)
3884{
3885 struct drm_i915_private *dev_priv = dev->dev_private;
3886 unsigned long irqflags;
3887
3888 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3889
3890 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
3891 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3892 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3893 dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3894 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3895
3896 ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
3897 ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
3898 ilk_disable_gt_irq(dev_priv, 0xffffffff);
3899 snb_disable_pm_irq(dev_priv, 0xffffffff);
3900
3901 dev_priv->pc8.irqs_disabled = true;
3902
3903 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3904}
3905
3906/* Restore interrupts so we can recover from Package C8+. */
3907void hsw_pc8_restore_interrupts(struct drm_device *dev)
3908{
3909 struct drm_i915_private *dev_priv = dev->dev_private;
3910 unsigned long irqflags;
3911 uint32_t val, expected;
3912
3913 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3914
3915 val = I915_READ(DEIMR);
3916 expected = ~DE_PCH_EVENT_IVB;
3917 WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
3918
3919 val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
3920 expected = ~SDE_HOTPLUG_MASK_CPT;
3921 WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
3922 val, expected);
3923
3924 val = I915_READ(GTIMR);
3925 expected = 0xffffffff;
3926 WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
3927
3928 val = I915_READ(GEN6_PMIMR);
3929 expected = 0xffffffff;
3930 WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
3931 expected);
3932
3933 dev_priv->pc8.irqs_disabled = false;
3934
3935 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3936 ibx_enable_display_interrupt(dev_priv,
3937 ~dev_priv->pc8.regsave.sdeimr &
3938 ~SDE_HOTPLUG_MASK_CPT);
3939 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3940 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3941 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
3942
3943 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3944}