blob: 8872e1955c45f5252690cf695388d0477ae2c7c4 [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
34#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010036#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Egbert Eiche5868a32013-02-28 04:17:12 -050039static const u32 hpd_ibx[] = {
40 [HPD_CRT] = SDE_CRT_HOTPLUG,
41 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
45};
46
47static const u32 hpd_cpt[] = {
48 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010049 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050050 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53};
54
55static const u32 hpd_mask_i915[] = {
56 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62};
63
64static const u32 hpd_status_gen4[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71};
72
Egbert Eiche5868a32013-02-28 04:17:12 -050073static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80};
81
Zhenyu Wang036a4a72009-06-08 14:40:19 +080082/* For display hotplug interrupt */
Chris Wilson995b6762010-08-20 13:23:26 +010083static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -050084ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +080085{
Daniel Vetter4bc9d432013-06-27 13:44:58 +020086 assert_spin_locked(&dev_priv->irq_lock);
87
Chris Wilson1ec14ad2010-12-04 11:30:53 +000088 if ((dev_priv->irq_mask & mask) != 0) {
89 dev_priv->irq_mask &= ~mask;
90 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +000091 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +080092 }
93}
94
Paulo Zanoni0ff98002013-02-22 17:05:31 -030095static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -050096ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +080097{
Daniel Vetter4bc9d432013-06-27 13:44:58 +020098 assert_spin_locked(&dev_priv->irq_lock);
99
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000100 if ((dev_priv->irq_mask & mask) != mask) {
101 dev_priv->irq_mask |= mask;
102 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000103 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800104 }
105}
106
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300107/**
108 * ilk_update_gt_irq - update GTIMR
109 * @dev_priv: driver private
110 * @interrupt_mask: mask of interrupt bits to update
111 * @enabled_irq_mask: mask of interrupt bits to enable
112 */
113static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
114 uint32_t interrupt_mask,
115 uint32_t enabled_irq_mask)
116{
117 assert_spin_locked(&dev_priv->irq_lock);
118
119 dev_priv->gt_irq_mask &= ~interrupt_mask;
120 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
121 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
122 POSTING_READ(GTIMR);
123}
124
125void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
126{
127 ilk_update_gt_irq(dev_priv, mask, mask);
128}
129
130void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
131{
132 ilk_update_gt_irq(dev_priv, mask, 0);
133}
134
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300135/**
136 * snb_update_pm_irq - update GEN6_PMIMR
137 * @dev_priv: driver private
138 * @interrupt_mask: mask of interrupt bits to update
139 * @enabled_irq_mask: mask of interrupt bits to enable
140 */
141static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
142 uint32_t interrupt_mask,
143 uint32_t enabled_irq_mask)
144{
Paulo Zanoni605cd252013-08-06 18:57:15 -0300145 uint32_t new_val;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300146
147 assert_spin_locked(&dev_priv->irq_lock);
148
Paulo Zanoni605cd252013-08-06 18:57:15 -0300149 new_val = dev_priv->pm_irq_mask;
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300150 new_val &= ~interrupt_mask;
151 new_val |= (~enabled_irq_mask & interrupt_mask);
152
Paulo Zanoni605cd252013-08-06 18:57:15 -0300153 if (new_val != dev_priv->pm_irq_mask) {
154 dev_priv->pm_irq_mask = new_val;
155 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300156 POSTING_READ(GEN6_PMIMR);
157 }
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300158}
159
160void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
161{
162 snb_update_pm_irq(dev_priv, mask, mask);
163}
164
165void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
166{
167 snb_update_pm_irq(dev_priv, mask, 0);
168}
169
170static void snb_set_pm_irq(struct drm_i915_private *dev_priv, uint32_t val)
171{
172 snb_update_pm_irq(dev_priv, 0xffffffff, ~val);
173}
174
Paulo Zanoni86642812013-04-12 17:57:57 -0300175static bool ivb_can_enable_err_int(struct drm_device *dev)
176{
177 struct drm_i915_private *dev_priv = dev->dev_private;
178 struct intel_crtc *crtc;
179 enum pipe pipe;
180
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200181 assert_spin_locked(&dev_priv->irq_lock);
182
Paulo Zanoni86642812013-04-12 17:57:57 -0300183 for_each_pipe(pipe) {
184 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
185
186 if (crtc->cpu_fifo_underrun_disabled)
187 return false;
188 }
189
190 return true;
191}
192
193static bool cpt_can_enable_serr_int(struct drm_device *dev)
194{
195 struct drm_i915_private *dev_priv = dev->dev_private;
196 enum pipe pipe;
197 struct intel_crtc *crtc;
198
Daniel Vetterfee884e2013-07-04 23:35:21 +0200199 assert_spin_locked(&dev_priv->irq_lock);
200
Paulo Zanoni86642812013-04-12 17:57:57 -0300201 for_each_pipe(pipe) {
202 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
203
204 if (crtc->pch_fifo_underrun_disabled)
205 return false;
206 }
207
208 return true;
209}
210
211static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
212 enum pipe pipe, bool enable)
213{
214 struct drm_i915_private *dev_priv = dev->dev_private;
215 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
216 DE_PIPEB_FIFO_UNDERRUN;
217
218 if (enable)
219 ironlake_enable_display_irq(dev_priv, bit);
220 else
221 ironlake_disable_display_irq(dev_priv, bit);
222}
223
224static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
Daniel Vetter7336df62013-07-09 22:59:16 +0200225 enum pipe pipe, bool enable)
Paulo Zanoni86642812013-04-12 17:57:57 -0300226{
227 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni86642812013-04-12 17:57:57 -0300228 if (enable) {
Daniel Vetter7336df62013-07-09 22:59:16 +0200229 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
230
Paulo Zanoni86642812013-04-12 17:57:57 -0300231 if (!ivb_can_enable_err_int(dev))
232 return;
233
Paulo Zanoni86642812013-04-12 17:57:57 -0300234 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
235 } else {
Daniel Vetter7336df62013-07-09 22:59:16 +0200236 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
237
238 /* Change the state _after_ we've read out the current one. */
Paulo Zanoni86642812013-04-12 17:57:57 -0300239 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
Daniel Vetter7336df62013-07-09 22:59:16 +0200240
241 if (!was_enabled &&
242 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
243 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
244 pipe_name(pipe));
245 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300246 }
247}
248
Daniel Vetterfee884e2013-07-04 23:35:21 +0200249/**
250 * ibx_display_interrupt_update - update SDEIMR
251 * @dev_priv: driver private
252 * @interrupt_mask: mask of interrupt bits to update
253 * @enabled_irq_mask: mask of interrupt bits to enable
254 */
255static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
256 uint32_t interrupt_mask,
257 uint32_t enabled_irq_mask)
258{
259 uint32_t sdeimr = I915_READ(SDEIMR);
260 sdeimr &= ~interrupt_mask;
261 sdeimr |= (~enabled_irq_mask & interrupt_mask);
262
263 assert_spin_locked(&dev_priv->irq_lock);
264
265 I915_WRITE(SDEIMR, sdeimr);
266 POSTING_READ(SDEIMR);
267}
268#define ibx_enable_display_interrupt(dev_priv, bits) \
269 ibx_display_interrupt_update((dev_priv), (bits), (bits))
270#define ibx_disable_display_interrupt(dev_priv, bits) \
271 ibx_display_interrupt_update((dev_priv), (bits), 0)
272
Daniel Vetterde280752013-07-04 23:35:24 +0200273static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
274 enum transcoder pch_transcoder,
Paulo Zanoni86642812013-04-12 17:57:57 -0300275 bool enable)
276{
Paulo Zanoni86642812013-04-12 17:57:57 -0300277 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200278 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
279 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
Paulo Zanoni86642812013-04-12 17:57:57 -0300280
281 if (enable)
Daniel Vetterfee884e2013-07-04 23:35:21 +0200282 ibx_enable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300283 else
Daniel Vetterfee884e2013-07-04 23:35:21 +0200284 ibx_disable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300285}
286
287static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
288 enum transcoder pch_transcoder,
289 bool enable)
290{
291 struct drm_i915_private *dev_priv = dev->dev_private;
292
293 if (enable) {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200294 I915_WRITE(SERR_INT,
295 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
296
Paulo Zanoni86642812013-04-12 17:57:57 -0300297 if (!cpt_can_enable_serr_int(dev))
298 return;
299
Daniel Vetterfee884e2013-07-04 23:35:21 +0200300 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Paulo Zanoni86642812013-04-12 17:57:57 -0300301 } else {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200302 uint32_t tmp = I915_READ(SERR_INT);
303 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
304
305 /* Change the state _after_ we've read out the current one. */
Daniel Vetterfee884e2013-07-04 23:35:21 +0200306 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200307
308 if (!was_enabled &&
309 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
310 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
311 transcoder_name(pch_transcoder));
312 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300313 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300314}
315
316/**
317 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
318 * @dev: drm device
319 * @pipe: pipe
320 * @enable: true if we want to report FIFO underrun errors, false otherwise
321 *
322 * This function makes us disable or enable CPU fifo underruns for a specific
323 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
324 * reporting for one pipe may also disable all the other CPU error interruts for
325 * the other pipes, due to the fact that there's just one interrupt mask/enable
326 * bit for all the pipes.
327 *
328 * Returns the previous state of underrun reporting.
329 */
330bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
331 enum pipe pipe, bool enable)
332{
333 struct drm_i915_private *dev_priv = dev->dev_private;
334 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
335 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
336 unsigned long flags;
337 bool ret;
338
339 spin_lock_irqsave(&dev_priv->irq_lock, flags);
340
341 ret = !intel_crtc->cpu_fifo_underrun_disabled;
342
343 if (enable == ret)
344 goto done;
345
346 intel_crtc->cpu_fifo_underrun_disabled = !enable;
347
348 if (IS_GEN5(dev) || IS_GEN6(dev))
349 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
350 else if (IS_GEN7(dev))
Daniel Vetter7336df62013-07-09 22:59:16 +0200351 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300352
353done:
354 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
355 return ret;
356}
357
358/**
359 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
360 * @dev: drm device
361 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
362 * @enable: true if we want to report FIFO underrun errors, false otherwise
363 *
364 * This function makes us disable or enable PCH fifo underruns for a specific
365 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
366 * underrun reporting for one transcoder may also disable all the other PCH
367 * error interruts for the other transcoders, due to the fact that there's just
368 * one interrupt mask/enable bit for all the transcoders.
369 *
370 * Returns the previous state of underrun reporting.
371 */
372bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
373 enum transcoder pch_transcoder,
374 bool enable)
375{
376 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200377 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
378 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni86642812013-04-12 17:57:57 -0300379 unsigned long flags;
380 bool ret;
381
Daniel Vetterde280752013-07-04 23:35:24 +0200382 /*
383 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
384 * has only one pch transcoder A that all pipes can use. To avoid racy
385 * pch transcoder -> pipe lookups from interrupt code simply store the
386 * underrun statistics in crtc A. Since we never expose this anywhere
387 * nor use it outside of the fifo underrun code here using the "wrong"
388 * crtc on LPT won't cause issues.
389 */
Paulo Zanoni86642812013-04-12 17:57:57 -0300390
391 spin_lock_irqsave(&dev_priv->irq_lock, flags);
392
393 ret = !intel_crtc->pch_fifo_underrun_disabled;
394
395 if (enable == ret)
396 goto done;
397
398 intel_crtc->pch_fifo_underrun_disabled = !enable;
399
400 if (HAS_PCH_IBX(dev))
Daniel Vetterde280752013-07-04 23:35:24 +0200401 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300402 else
403 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
404
405done:
406 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
407 return ret;
408}
409
410
Keith Packard7c463582008-11-04 02:03:27 -0800411void
412i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
413{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200414 u32 reg = PIPESTAT(pipe);
415 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800416
Daniel Vetterb79480b2013-06-27 17:52:10 +0200417 assert_spin_locked(&dev_priv->irq_lock);
418
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200419 if ((pipestat & mask) == mask)
420 return;
421
422 /* Enable the interrupt, clear any pending status */
423 pipestat |= mask | (mask >> 16);
424 I915_WRITE(reg, pipestat);
425 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800426}
427
428void
429i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
430{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200431 u32 reg = PIPESTAT(pipe);
432 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800433
Daniel Vetterb79480b2013-06-27 17:52:10 +0200434 assert_spin_locked(&dev_priv->irq_lock);
435
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200436 if ((pipestat & mask) == 0)
437 return;
438
439 pipestat &= ~mask;
440 I915_WRITE(reg, pipestat);
441 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800442}
443
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000444/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300445 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Zhao Yakui01c66882009-10-28 05:10:00 +0000446 */
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300447static void i915_enable_asle_pipestat(struct drm_device *dev)
Zhao Yakui01c66882009-10-28 05:10:00 +0000448{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000449 drm_i915_private_t *dev_priv = dev->dev_private;
450 unsigned long irqflags;
451
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300452 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
453 return;
454
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000455 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000456
Jani Nikulaf8987802013-04-29 13:02:53 +0300457 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
458 if (INTEL_INFO(dev)->gen >= 4)
459 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000460
461 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000462}
463
464/**
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700465 * i915_pipe_enabled - check if a pipe is enabled
466 * @dev: DRM device
467 * @pipe: pipe to check
468 *
469 * Reading certain registers when the pipe is disabled can hang the chip.
470 * Use this routine to make sure the PLL is running and the pipe is active
471 * before reading such registers if unsure.
472 */
473static int
474i915_pipe_enabled(struct drm_device *dev, int pipe)
475{
476 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni702e7a52012-10-23 18:29:59 -0200477
Daniel Vettera01025a2013-05-22 00:50:23 +0200478 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
479 /* Locking is horribly broken here, but whatever. */
480 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
481 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni71f8ba62013-05-03 12:15:39 -0300482
Daniel Vettera01025a2013-05-22 00:50:23 +0200483 return intel_crtc->active;
484 } else {
485 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
486 }
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700487}
488
Keith Packard42f52ef2008-10-18 19:39:29 -0700489/* Called from drm generic code, passed a 'crtc', which
490 * we use as a pipe index
491 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700492static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700493{
494 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
495 unsigned long high_frame;
496 unsigned long low_frame;
Chris Wilson5eddb702010-09-11 13:48:45 +0100497 u32 high1, high2, low;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700498
499 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800500 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800501 "pipe %c\n", pipe_name(pipe));
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700502 return 0;
503 }
504
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800505 high_frame = PIPEFRAME(pipe);
506 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100507
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700508 /*
509 * High & low register fields aren't synchronized, so make sure
510 * we get a low value that's stable across two reads of the high
511 * register.
512 */
513 do {
Chris Wilson5eddb702010-09-11 13:48:45 +0100514 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
515 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
516 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700517 } while (high1 != high2);
518
Chris Wilson5eddb702010-09-11 13:48:45 +0100519 high1 >>= PIPE_FRAME_HIGH_SHIFT;
520 low >>= PIPE_FRAME_LOW_SHIFT;
521 return (high1 << 8) | low;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700522}
523
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700524static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800525{
526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800527 int reg = PIPE_FRMCOUNT_GM45(pipe);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800528
529 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800530 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800531 "pipe %c\n", pipe_name(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800532 return 0;
533 }
534
535 return I915_READ(reg);
536}
537
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700538static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100539 int *vpos, int *hpos)
540{
541 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
542 u32 vbl = 0, position = 0;
543 int vbl_start, vbl_end, htotal, vtotal;
544 bool in_vbl = true;
545 int ret = 0;
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200546 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
547 pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100548
549 if (!i915_pipe_enabled(dev, pipe)) {
550 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800551 "pipe %c\n", pipe_name(pipe));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100552 return 0;
553 }
554
555 /* Get vtotal. */
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200556 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100557
558 if (INTEL_INFO(dev)->gen >= 4) {
559 /* No obvious pixelcount register. Only query vertical
560 * scanout position from Display scan line register.
561 */
562 position = I915_READ(PIPEDSL(pipe));
563
564 /* Decode into vertical scanout position. Don't have
565 * horizontal scanout position.
566 */
567 *vpos = position & 0x1fff;
568 *hpos = 0;
569 } else {
570 /* Have access to pixelcount since start of frame.
571 * We can split this into vertical and horizontal
572 * scanout position.
573 */
574 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
575
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200576 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100577 *vpos = position / htotal;
578 *hpos = position - (*vpos * htotal);
579 }
580
581 /* Query vblank area. */
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200582 vbl = I915_READ(VBLANK(cpu_transcoder));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100583
584 /* Test position against vblank region. */
585 vbl_start = vbl & 0x1fff;
586 vbl_end = (vbl >> 16) & 0x1fff;
587
588 if ((*vpos < vbl_start) || (*vpos > vbl_end))
589 in_vbl = false;
590
591 /* Inside "upper part" of vblank area? Apply corrective offset: */
592 if (in_vbl && (*vpos >= vbl_start))
593 *vpos = *vpos - vtotal;
594
595 /* Readouts valid? */
596 if (vbl > 0)
597 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
598
599 /* In vblank? */
600 if (in_vbl)
601 ret |= DRM_SCANOUTPOS_INVBL;
602
603 return ret;
604}
605
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700606static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100607 int *max_error,
608 struct timeval *vblank_time,
609 unsigned flags)
610{
Chris Wilson4041b852011-01-22 10:07:56 +0000611 struct drm_crtc *crtc;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100612
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700613 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
Chris Wilson4041b852011-01-22 10:07:56 +0000614 DRM_ERROR("Invalid crtc %d\n", pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100615 return -EINVAL;
616 }
617
618 /* Get drm_crtc to timestamp: */
Chris Wilson4041b852011-01-22 10:07:56 +0000619 crtc = intel_get_crtc_for_pipe(dev, pipe);
620 if (crtc == NULL) {
621 DRM_ERROR("Invalid crtc %d\n", pipe);
622 return -EINVAL;
623 }
624
625 if (!crtc->enabled) {
626 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
627 return -EBUSY;
628 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100629
630 /* Helper routine in DRM core does all the work: */
Chris Wilson4041b852011-01-22 10:07:56 +0000631 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
632 vblank_time, flags,
633 crtc);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100634}
635
Egbert Eich321a1b32013-04-11 16:00:26 +0200636static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
637{
638 enum drm_connector_status old_status;
639
640 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
641 old_status = connector->status;
642
643 connector->status = connector->funcs->detect(connector, false);
644 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
645 connector->base.id,
646 drm_get_connector_name(connector),
647 old_status, connector->status);
648 return (old_status != connector->status);
649}
650
Jesse Barnes5ca58282009-03-31 14:11:15 -0700651/*
652 * Handle hotplug events outside the interrupt handler proper.
653 */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200654#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
655
Jesse Barnes5ca58282009-03-31 14:11:15 -0700656static void i915_hotplug_work_func(struct work_struct *work)
657{
658 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
659 hotplug_work);
660 struct drm_device *dev = dev_priv->dev;
Keith Packardc31c4ba2009-05-06 11:48:58 -0700661 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200662 struct intel_connector *intel_connector;
663 struct intel_encoder *intel_encoder;
664 struct drm_connector *connector;
665 unsigned long irqflags;
666 bool hpd_disabled = false;
Egbert Eich321a1b32013-04-11 16:00:26 +0200667 bool changed = false;
Egbert Eich142e2392013-04-11 15:57:57 +0200668 u32 hpd_event_bits;
Jesse Barnes5ca58282009-03-31 14:11:15 -0700669
Daniel Vetter52d7ece2012-12-01 21:03:22 +0100670 /* HPD irq before everything is fully set up. */
671 if (!dev_priv->enable_hotplug_processing)
672 return;
673
Keith Packarda65e34c2011-07-25 10:04:56 -0700674 mutex_lock(&mode_config->mutex);
Jesse Barnese67189ab2011-02-11 14:44:51 -0800675 DRM_DEBUG_KMS("running encoder hotplug functions\n");
676
Egbert Eichcd569ae2013-04-16 13:36:57 +0200677 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Egbert Eich142e2392013-04-11 15:57:57 +0200678
679 hpd_event_bits = dev_priv->hpd_event_bits;
680 dev_priv->hpd_event_bits = 0;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200681 list_for_each_entry(connector, &mode_config->connector_list, head) {
682 intel_connector = to_intel_connector(connector);
683 intel_encoder = intel_connector->encoder;
684 if (intel_encoder->hpd_pin > HPD_NONE &&
685 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
686 connector->polled == DRM_CONNECTOR_POLL_HPD) {
687 DRM_INFO("HPD interrupt storm detected on connector %s: "
688 "switching from hotplug detection to polling\n",
689 drm_get_connector_name(connector));
690 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
691 connector->polled = DRM_CONNECTOR_POLL_CONNECT
692 | DRM_CONNECTOR_POLL_DISCONNECT;
693 hpd_disabled = true;
694 }
Egbert Eich142e2392013-04-11 15:57:57 +0200695 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
696 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
697 drm_get_connector_name(connector), intel_encoder->hpd_pin);
698 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200699 }
700 /* if there were no outputs to poll, poll was disabled,
701 * therefore make sure it's enabled when disabling HPD on
702 * some connectors */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200703 if (hpd_disabled) {
Egbert Eichcd569ae2013-04-16 13:36:57 +0200704 drm_kms_helper_poll_enable(dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +0200705 mod_timer(&dev_priv->hotplug_reenable_timer,
706 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
707 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200708
709 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
710
Egbert Eich321a1b32013-04-11 16:00:26 +0200711 list_for_each_entry(connector, &mode_config->connector_list, head) {
712 intel_connector = to_intel_connector(connector);
713 intel_encoder = intel_connector->encoder;
714 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
715 if (intel_encoder->hot_plug)
716 intel_encoder->hot_plug(intel_encoder);
717 if (intel_hpd_irq_event(dev, connector))
718 changed = true;
719 }
720 }
Keith Packard40ee3382011-07-28 15:31:19 -0700721 mutex_unlock(&mode_config->mutex);
722
Egbert Eich321a1b32013-04-11 16:00:26 +0200723 if (changed)
724 drm_kms_helper_hotplug_event(dev);
Jesse Barnes5ca58282009-03-31 14:11:15 -0700725}
726
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200727static void ironlake_rps_change_irq_handler(struct drm_device *dev)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800728{
729 drm_i915_private_t *dev_priv = dev->dev_private;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000730 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +0200731 u8 new_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200732
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200733 spin_lock(&mchdev_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800734
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200735 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
736
Daniel Vetter20e4d402012-08-08 23:35:39 +0200737 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200738
Jesse Barnes7648fa92010-05-20 14:28:11 -0700739 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000740 busy_up = I915_READ(RCPREVBSYTUPAVG);
741 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800742 max_avg = I915_READ(RCBMAXAVG);
743 min_avg = I915_READ(RCBMINAVG);
744
745 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000746 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200747 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
748 new_delay = dev_priv->ips.cur_delay - 1;
749 if (new_delay < dev_priv->ips.max_delay)
750 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000751 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200752 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
753 new_delay = dev_priv->ips.cur_delay + 1;
754 if (new_delay > dev_priv->ips.min_delay)
755 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800756 }
757
Jesse Barnes7648fa92010-05-20 14:28:11 -0700758 if (ironlake_set_drps(dev, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +0200759 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800760
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200761 spin_unlock(&mchdev_lock);
Daniel Vetter92703882012-08-09 16:46:01 +0200762
Jesse Barnesf97108d2010-01-29 11:27:07 -0800763 return;
764}
765
Chris Wilson549f7362010-10-19 11:19:32 +0100766static void notify_ring(struct drm_device *dev,
767 struct intel_ring_buffer *ring)
768{
Chris Wilson475553d2011-01-20 09:52:56 +0000769 if (ring->obj == NULL)
770 return;
771
Chris Wilsonb2eadbc2012-08-09 10:58:30 +0100772 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
Chris Wilson9862e602011-01-04 22:22:17 +0000773
Chris Wilson549f7362010-10-19 11:19:32 +0100774 wake_up_all(&ring->irq_queue);
Mika Kuoppala10cd45b2013-07-03 17:22:08 +0300775 i915_queue_hangcheck(dev);
Chris Wilson549f7362010-10-19 11:19:32 +0100776}
777
Ben Widawsky4912d042011-04-25 11:25:20 -0700778static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800779{
Ben Widawsky4912d042011-04-25 11:25:20 -0700780 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200781 rps.work);
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300782 u32 pm_iir;
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100783 u8 new_delay;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800784
Daniel Vetter59cdb632013-07-04 23:35:28 +0200785 spin_lock_irq(&dev_priv->irq_lock);
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200786 pm_iir = dev_priv->rps.pm_iir;
787 dev_priv->rps.pm_iir = 0;
Ben Widawsky48484052013-05-28 19:22:27 -0700788 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300789 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
Daniel Vetter59cdb632013-07-04 23:35:28 +0200790 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawsky4912d042011-04-25 11:25:20 -0700791
Ben Widawsky48484052013-05-28 19:22:27 -0700792 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800793 return;
794
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700795 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100796
Ville Syrjälä74250342013-06-25 21:38:11 +0300797 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200798 new_delay = dev_priv->rps.cur_delay + 1;
Ville Syrjälä74250342013-06-25 21:38:11 +0300799
800 /*
801 * For better performance, jump directly
802 * to RPe if we're below it.
803 */
804 if (IS_VALLEYVIEW(dev_priv->dev) &&
805 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
806 new_delay = dev_priv->rps.rpe_delay;
807 } else
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200808 new_delay = dev_priv->rps.cur_delay - 1;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800809
Ben Widawsky79249632012-09-07 19:43:42 -0700810 /* sysfs frequency interfaces may have snuck in while servicing the
811 * interrupt
812 */
Ville Syrjäläd8289c92013-06-25 19:21:05 +0300813 if (new_delay >= dev_priv->rps.min_delay &&
814 new_delay <= dev_priv->rps.max_delay) {
Jesse Barnes0a073b82013-04-17 15:54:58 -0700815 if (IS_VALLEYVIEW(dev_priv->dev))
816 valleyview_set_rps(dev_priv->dev, new_delay);
817 else
818 gen6_set_rps(dev_priv->dev, new_delay);
Ben Widawsky79249632012-09-07 19:43:42 -0700819 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800820
Jesse Barnes52ceb902013-04-23 10:09:26 -0700821 if (IS_VALLEYVIEW(dev_priv->dev)) {
822 /*
823 * On VLV, when we enter RC6 we may not be at the minimum
824 * voltage level, so arm a timer to check. It should only
825 * fire when there's activity or once after we've entered
826 * RC6, and then won't be re-armed until the next RPS interrupt.
827 */
828 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
829 msecs_to_jiffies(100));
830 }
831
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700832 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800833}
834
Ben Widawskye3689192012-05-25 16:56:22 -0700835
836/**
837 * ivybridge_parity_work - Workqueue called when a parity error interrupt
838 * occurred.
839 * @work: workqueue struct
840 *
841 * Doesn't actually do anything except notify userspace. As a consequence of
842 * this event, userspace should try to remap the bad rows since statistically
843 * it is likely the same row is more likely to go bad again.
844 */
845static void ivybridge_parity_work(struct work_struct *work)
846{
847 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vettera4da4fa2012-11-02 19:55:07 +0100848 l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -0700849 u32 error_status, row, bank, subbank;
850 char *parity_event[5];
851 uint32_t misccpctl;
852 unsigned long flags;
853
854 /* We must turn off DOP level clock gating to access the L3 registers.
855 * In order to prevent a get/put style interface, acquire struct mutex
856 * any time we access those registers.
857 */
858 mutex_lock(&dev_priv->dev->struct_mutex);
859
860 misccpctl = I915_READ(GEN7_MISCCPCTL);
861 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
862 POSTING_READ(GEN7_MISCCPCTL);
863
864 error_status = I915_READ(GEN7_L3CDERRST1);
865 row = GEN7_PARITY_ERROR_ROW(error_status);
866 bank = GEN7_PARITY_ERROR_BANK(error_status);
867 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
868
869 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
870 GEN7_L3CDERRST1_ENABLE);
871 POSTING_READ(GEN7_L3CDERRST1);
872
873 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
874
875 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300876 ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
Ben Widawskye3689192012-05-25 16:56:22 -0700877 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
878
879 mutex_unlock(&dev_priv->dev->struct_mutex);
880
Ben Widawskycce723e2013-07-19 09:16:42 -0700881 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
Ben Widawskye3689192012-05-25 16:56:22 -0700882 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
883 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
884 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
885 parity_event[4] = NULL;
886
887 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
888 KOBJ_CHANGE, parity_event);
889
890 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
891 row, bank, subbank);
892
893 kfree(parity_event[3]);
894 kfree(parity_event[2]);
895 kfree(parity_event[1]);
896}
897
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200898static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
Ben Widawskye3689192012-05-25 16:56:22 -0700899{
900 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Ben Widawskye3689192012-05-25 16:56:22 -0700901
Ben Widawskye1ef7cc2012-07-24 20:47:31 -0700902 if (!HAS_L3_GPU_CACHE(dev))
Ben Widawskye3689192012-05-25 16:56:22 -0700903 return;
904
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200905 spin_lock(&dev_priv->irq_lock);
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300906 ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200907 spin_unlock(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -0700908
Daniel Vettera4da4fa2012-11-02 19:55:07 +0100909 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -0700910}
911
Paulo Zanonif1af8fc2013-07-12 19:56:30 -0300912static void ilk_gt_irq_handler(struct drm_device *dev,
913 struct drm_i915_private *dev_priv,
914 u32 gt_iir)
915{
916 if (gt_iir &
917 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
918 notify_ring(dev, &dev_priv->ring[RCS]);
919 if (gt_iir & ILK_BSD_USER_INTERRUPT)
920 notify_ring(dev, &dev_priv->ring[VCS]);
921}
922
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200923static void snb_gt_irq_handler(struct drm_device *dev,
924 struct drm_i915_private *dev_priv,
925 u32 gt_iir)
926{
927
Ben Widawskycc609d52013-05-28 19:22:29 -0700928 if (gt_iir &
929 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200930 notify_ring(dev, &dev_priv->ring[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -0700931 if (gt_iir & GT_BSD_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200932 notify_ring(dev, &dev_priv->ring[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -0700933 if (gt_iir & GT_BLT_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200934 notify_ring(dev, &dev_priv->ring[BCS]);
935
Ben Widawskycc609d52013-05-28 19:22:29 -0700936 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
937 GT_BSD_CS_ERROR_INTERRUPT |
938 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200939 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
940 i915_handle_error(dev, false);
941 }
Ben Widawskye3689192012-05-25 16:56:22 -0700942
Ben Widawskycc609d52013-05-28 19:22:29 -0700943 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200944 ivybridge_parity_error_irq_handler(dev);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200945}
946
Ben Widawskybaf02a12013-05-28 19:22:24 -0700947/* Legacy way of handling PM interrupts */
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200948static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
949 u32 pm_iir)
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100950{
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100951 /*
952 * IIR bits should never already be set because IMR should
953 * prevent an interrupt from being shown in IIR. The warning
954 * displays a case where we've unsafely cleared
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200955 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100956 * type is not a problem, it displays a problem in the logic.
957 *
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200958 * The mask bit in IMR is cleared by dev_priv->rps.work.
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100959 */
960
Daniel Vetter59cdb632013-07-04 23:35:28 +0200961 spin_lock(&dev_priv->irq_lock);
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200962 dev_priv->rps.pm_iir |= pm_iir;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300963 snb_set_pm_irq(dev_priv, dev_priv->rps.pm_iir);
Daniel Vetter59cdb632013-07-04 23:35:28 +0200964 spin_unlock(&dev_priv->irq_lock);
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100965
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200966 queue_work(dev_priv->wq, &dev_priv->rps.work);
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100967}
968
Egbert Eichb543fb02013-04-16 13:36:54 +0200969#define HPD_STORM_DETECT_PERIOD 1000
970#define HPD_STORM_THRESHOLD 5
971
Daniel Vetter10a504d2013-06-27 17:52:12 +0200972static inline void intel_hpd_irq_handler(struct drm_device *dev,
Daniel Vetter22062db2013-06-27 17:52:11 +0200973 u32 hotplug_trigger,
974 const u32 *hpd)
Egbert Eichb543fb02013-04-16 13:36:54 +0200975{
976 drm_i915_private_t *dev_priv = dev->dev_private;
Egbert Eichb543fb02013-04-16 13:36:54 +0200977 int i;
Daniel Vetter10a504d2013-06-27 17:52:12 +0200978 bool storm_detected = false;
Egbert Eichb543fb02013-04-16 13:36:54 +0200979
Daniel Vetter91d131d2013-06-27 17:52:14 +0200980 if (!hotplug_trigger)
981 return;
982
Daniel Vetterb5ea2d52013-06-27 17:52:15 +0200983 spin_lock(&dev_priv->irq_lock);
Egbert Eichb543fb02013-04-16 13:36:54 +0200984 for (i = 1; i < HPD_NUM_PINS; i++) {
Egbert Eich821450c2013-04-16 13:36:55 +0200985
Egbert Eichb8f102e2013-07-26 14:14:24 +0200986 WARN(((hpd[i] & hotplug_trigger) &&
987 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
988 "Received HPD interrupt although disabled\n");
989
Egbert Eichb543fb02013-04-16 13:36:54 +0200990 if (!(hpd[i] & hotplug_trigger) ||
991 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
992 continue;
993
Jani Nikulabc5ead8c2013-05-07 15:10:29 +0300994 dev_priv->hpd_event_bits |= (1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +0200995 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
996 dev_priv->hpd_stats[i].hpd_last_jiffies
997 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
998 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
999 dev_priv->hpd_stats[i].hpd_cnt = 0;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001000 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001001 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1002 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
Egbert Eich142e2392013-04-11 15:57:57 +02001003 dev_priv->hpd_event_bits &= ~(1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +02001004 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
Daniel Vetter10a504d2013-06-27 17:52:12 +02001005 storm_detected = true;
Egbert Eichb543fb02013-04-16 13:36:54 +02001006 } else {
1007 dev_priv->hpd_stats[i].hpd_cnt++;
Egbert Eichb8f102e2013-07-26 14:14:24 +02001008 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1009 dev_priv->hpd_stats[i].hpd_cnt);
Egbert Eichb543fb02013-04-16 13:36:54 +02001010 }
1011 }
1012
Daniel Vetter10a504d2013-06-27 17:52:12 +02001013 if (storm_detected)
1014 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02001015 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter5876fa02013-06-27 17:52:13 +02001016
1017 queue_work(dev_priv->wq,
1018 &dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02001019}
1020
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001021static void gmbus_irq_handler(struct drm_device *dev)
1022{
Daniel Vetter28c70f12012-12-01 13:53:45 +01001023 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1024
Daniel Vetter28c70f12012-12-01 13:53:45 +01001025 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001026}
1027
Daniel Vetterce99c252012-12-01 13:53:47 +01001028static void dp_aux_irq_handler(struct drm_device *dev)
1029{
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001030 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1031
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001032 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +01001033}
1034
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001035/* Unlike gen6_rps_irq_handler() from which this function is originally derived,
Ben Widawskybaf02a12013-05-28 19:22:24 -07001036 * we must be able to deal with other PM interrupts. This is complicated because
1037 * of the way in which we use the masks to defer the RPS work (which for
1038 * posterity is necessary because of forcewake).
1039 */
1040static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
1041 u32 pm_iir)
1042{
Daniel Vetter41a05a32013-07-04 23:35:26 +02001043 if (pm_iir & GEN6_PM_RPS_EVENTS) {
Daniel Vetter59cdb632013-07-04 23:35:28 +02001044 spin_lock(&dev_priv->irq_lock);
Daniel Vetter41a05a32013-07-04 23:35:26 +02001045 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -03001046 snb_set_pm_irq(dev_priv, dev_priv->rps.pm_iir);
1047 /* never want to mask useful interrupts. */
Ben Widawsky48484052013-05-28 19:22:27 -07001048 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
Daniel Vetter59cdb632013-07-04 23:35:28 +02001049 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter2adbee62013-07-04 23:35:27 +02001050
1051 queue_work(dev_priv->wq, &dev_priv->rps.work);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001052 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001053
Daniel Vetter41a05a32013-07-04 23:35:26 +02001054 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1055 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
Ben Widawsky12638c52013-05-28 19:22:31 -07001056
Daniel Vetter41a05a32013-07-04 23:35:26 +02001057 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1058 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
1059 i915_handle_error(dev_priv->dev, false);
Ben Widawsky12638c52013-05-28 19:22:31 -07001060 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001061}
1062
Daniel Vetterff1f5252012-10-02 15:10:55 +02001063static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001064{
1065 struct drm_device *dev = (struct drm_device *) arg;
1066 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1067 u32 iir, gt_iir, pm_iir;
1068 irqreturn_t ret = IRQ_NONE;
1069 unsigned long irqflags;
1070 int pipe;
1071 u32 pipe_stats[I915_MAX_PIPES];
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001072
1073 atomic_inc(&dev_priv->irq_received);
1074
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001075 while (true) {
1076 iir = I915_READ(VLV_IIR);
1077 gt_iir = I915_READ(GTIIR);
1078 pm_iir = I915_READ(GEN6_PMIIR);
1079
1080 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1081 goto out;
1082
1083 ret = IRQ_HANDLED;
1084
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001085 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001086
1087 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1088 for_each_pipe(pipe) {
1089 int reg = PIPESTAT(pipe);
1090 pipe_stats[pipe] = I915_READ(reg);
1091
1092 /*
1093 * Clear the PIPE*STAT regs before the IIR
1094 */
1095 if (pipe_stats[pipe] & 0x8000ffff) {
1096 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1097 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1098 pipe_name(pipe));
1099 I915_WRITE(reg, pipe_stats[pipe]);
1100 }
1101 }
1102 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1103
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001104 for_each_pipe(pipe) {
1105 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1106 drm_handle_vblank(dev, pipe);
1107
1108 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1109 intel_prepare_page_flip(dev, pipe);
1110 intel_finish_page_flip(dev, pipe);
1111 }
1112 }
1113
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001114 /* Consume port. Then clear IIR or we'll miss events */
1115 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1116 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02001117 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001118
1119 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1120 hotplug_status);
Daniel Vetter91d131d2013-06-27 17:52:14 +02001121
1122 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1123
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001124 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1125 I915_READ(PORT_HOTPLUG_STAT);
1126 }
1127
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001128 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1129 gmbus_irq_handler(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001130
Ben Widawsky48484052013-05-28 19:22:27 -07001131 if (pm_iir & GEN6_PM_RPS_EVENTS)
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001132 gen6_rps_irq_handler(dev_priv, pm_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001133
1134 I915_WRITE(GTIIR, gt_iir);
1135 I915_WRITE(GEN6_PMIIR, pm_iir);
1136 I915_WRITE(VLV_IIR, iir);
1137 }
1138
1139out:
1140 return ret;
1141}
1142
Adam Jackson23e81d62012-06-06 15:45:44 -04001143static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08001144{
1145 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001146 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001147 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08001148
Daniel Vetter91d131d2013-06-27 17:52:14 +02001149 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1150
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001151 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1152 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1153 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08001154 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001155 port_name(port));
1156 }
Jesse Barnes776ad802011-01-04 15:09:39 -08001157
Daniel Vetterce99c252012-12-01 13:53:47 +01001158 if (pch_iir & SDE_AUX_MASK)
1159 dp_aux_irq_handler(dev);
1160
Jesse Barnes776ad802011-01-04 15:09:39 -08001161 if (pch_iir & SDE_GMBUS)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001162 gmbus_irq_handler(dev);
Jesse Barnes776ad802011-01-04 15:09:39 -08001163
1164 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1165 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1166
1167 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1168 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1169
1170 if (pch_iir & SDE_POISON)
1171 DRM_ERROR("PCH poison interrupt\n");
1172
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001173 if (pch_iir & SDE_FDI_MASK)
1174 for_each_pipe(pipe)
1175 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1176 pipe_name(pipe),
1177 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08001178
1179 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1180 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1181
1182 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1183 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1184
Jesse Barnes776ad802011-01-04 15:09:39 -08001185 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Paulo Zanoni86642812013-04-12 17:57:57 -03001186 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1187 false))
1188 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1189
1190 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1191 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1192 false))
1193 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1194}
1195
1196static void ivb_err_int_handler(struct drm_device *dev)
1197{
1198 struct drm_i915_private *dev_priv = dev->dev_private;
1199 u32 err_int = I915_READ(GEN7_ERR_INT);
1200
Paulo Zanonide032bf2013-04-12 17:57:58 -03001201 if (err_int & ERR_INT_POISON)
1202 DRM_ERROR("Poison interrupt\n");
1203
Paulo Zanoni86642812013-04-12 17:57:57 -03001204 if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1205 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1206 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1207
1208 if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1209 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1210 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1211
1212 if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1213 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1214 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1215
1216 I915_WRITE(GEN7_ERR_INT, err_int);
1217}
1218
1219static void cpt_serr_int_handler(struct drm_device *dev)
1220{
1221 struct drm_i915_private *dev_priv = dev->dev_private;
1222 u32 serr_int = I915_READ(SERR_INT);
1223
Paulo Zanonide032bf2013-04-12 17:57:58 -03001224 if (serr_int & SERR_INT_POISON)
1225 DRM_ERROR("PCH poison interrupt\n");
1226
Paulo Zanoni86642812013-04-12 17:57:57 -03001227 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1228 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1229 false))
1230 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1231
1232 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1233 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1234 false))
1235 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1236
1237 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1238 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1239 false))
1240 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1241
1242 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08001243}
1244
Adam Jackson23e81d62012-06-06 15:45:44 -04001245static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1246{
1247 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1248 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001249 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04001250
Daniel Vetter91d131d2013-06-27 17:52:14 +02001251 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1252
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001253 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1254 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1255 SDE_AUDIO_POWER_SHIFT_CPT);
1256 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1257 port_name(port));
1258 }
Adam Jackson23e81d62012-06-06 15:45:44 -04001259
1260 if (pch_iir & SDE_AUX_MASK_CPT)
Daniel Vetterce99c252012-12-01 13:53:47 +01001261 dp_aux_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001262
1263 if (pch_iir & SDE_GMBUS_CPT)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001264 gmbus_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001265
1266 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1267 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1268
1269 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1270 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1271
1272 if (pch_iir & SDE_FDI_MASK_CPT)
1273 for_each_pipe(pipe)
1274 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1275 pipe_name(pipe),
1276 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03001277
1278 if (pch_iir & SDE_ERROR_CPT)
1279 cpt_serr_int_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001280}
1281
Paulo Zanonic008bc62013-07-12 16:35:10 -03001282static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1283{
1284 struct drm_i915_private *dev_priv = dev->dev_private;
1285
1286 if (de_iir & DE_AUX_CHANNEL_A)
1287 dp_aux_irq_handler(dev);
1288
1289 if (de_iir & DE_GSE)
1290 intel_opregion_asle_intr(dev);
1291
1292 if (de_iir & DE_PIPEA_VBLANK)
1293 drm_handle_vblank(dev, 0);
1294
1295 if (de_iir & DE_PIPEB_VBLANK)
1296 drm_handle_vblank(dev, 1);
1297
1298 if (de_iir & DE_POISON)
1299 DRM_ERROR("Poison interrupt\n");
1300
1301 if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1302 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1303 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1304
1305 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1306 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1307 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1308
1309 if (de_iir & DE_PLANEA_FLIP_DONE) {
1310 intel_prepare_page_flip(dev, 0);
1311 intel_finish_page_flip_plane(dev, 0);
1312 }
1313
1314 if (de_iir & DE_PLANEB_FLIP_DONE) {
1315 intel_prepare_page_flip(dev, 1);
1316 intel_finish_page_flip_plane(dev, 1);
1317 }
1318
1319 /* check event from PCH */
1320 if (de_iir & DE_PCH_EVENT) {
1321 u32 pch_iir = I915_READ(SDEIIR);
1322
1323 if (HAS_PCH_CPT(dev))
1324 cpt_irq_handler(dev, pch_iir);
1325 else
1326 ibx_irq_handler(dev, pch_iir);
1327
1328 /* should clear PCH hotplug event before clear CPU irq */
1329 I915_WRITE(SDEIIR, pch_iir);
1330 }
1331
1332 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1333 ironlake_rps_change_irq_handler(dev);
1334}
1335
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001336static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1337{
1338 struct drm_i915_private *dev_priv = dev->dev_private;
1339 int i;
1340
1341 if (de_iir & DE_ERR_INT_IVB)
1342 ivb_err_int_handler(dev);
1343
1344 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1345 dp_aux_irq_handler(dev);
1346
1347 if (de_iir & DE_GSE_IVB)
1348 intel_opregion_asle_intr(dev);
1349
1350 for (i = 0; i < 3; i++) {
1351 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1352 drm_handle_vblank(dev, i);
1353 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1354 intel_prepare_page_flip(dev, i);
1355 intel_finish_page_flip_plane(dev, i);
1356 }
1357 }
1358
1359 /* check event from PCH */
1360 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1361 u32 pch_iir = I915_READ(SDEIIR);
1362
1363 cpt_irq_handler(dev, pch_iir);
1364
1365 /* clear PCH hotplug event before clear CPU irq */
1366 I915_WRITE(SDEIIR, pch_iir);
1367 }
1368}
1369
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001370static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001371{
1372 struct drm_device *dev = (struct drm_device *) arg;
1373 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001374 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01001375 irqreturn_t ret = IRQ_NONE;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001376
1377 atomic_inc(&dev_priv->irq_received);
1378
Paulo Zanoni86642812013-04-12 17:57:57 -03001379 /* We get interrupts on unclaimed registers, so check for this before we
1380 * do any I915_{READ,WRITE}. */
Chris Wilson907b28c2013-07-19 20:36:52 +01001381 intel_uncore_check_errors(dev);
Paulo Zanoni86642812013-04-12 17:57:57 -03001382
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001383 /* disable master interrupt before clearing iir */
1384 de_ier = I915_READ(DEIER);
1385 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Paulo Zanoni23a78512013-07-12 16:35:14 -03001386 POSTING_READ(DEIER);
Chris Wilson0e434062012-05-09 21:45:44 +01001387
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001388 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1389 * interrupts will will be stored on its back queue, and then we'll be
1390 * able to process them after we restore SDEIER (as soon as we restore
1391 * it, we'll get an interrupt if SDEIIR still has something to process
1392 * due to its back queue). */
Ben Widawskyab5c6082013-04-05 13:12:41 -07001393 if (!HAS_PCH_NOP(dev)) {
1394 sde_ier = I915_READ(SDEIER);
1395 I915_WRITE(SDEIER, 0);
1396 POSTING_READ(SDEIER);
1397 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001398
Paulo Zanoni86642812013-04-12 17:57:57 -03001399 /* On Haswell, also mask ERR_INT because we don't want to risk
1400 * generating "unclaimed register" interrupts from inside the interrupt
1401 * handler. */
Daniel Vetter4bc9d432013-06-27 13:44:58 +02001402 if (IS_HASWELL(dev)) {
1403 spin_lock(&dev_priv->irq_lock);
Paulo Zanoni86642812013-04-12 17:57:57 -03001404 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
Daniel Vetter4bc9d432013-06-27 13:44:58 +02001405 spin_unlock(&dev_priv->irq_lock);
1406 }
Paulo Zanoni86642812013-04-12 17:57:57 -03001407
Chris Wilson0e434062012-05-09 21:45:44 +01001408 gt_iir = I915_READ(GTIIR);
1409 if (gt_iir) {
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001410 if (INTEL_INFO(dev)->gen >= 6)
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001411 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001412 else
1413 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001414 I915_WRITE(GTIIR, gt_iir);
1415 ret = IRQ_HANDLED;
1416 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001417
1418 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01001419 if (de_iir) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001420 if (INTEL_INFO(dev)->gen >= 7)
1421 ivb_display_irq_handler(dev, de_iir);
1422 else
1423 ilk_display_irq_handler(dev, de_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001424 I915_WRITE(DEIIR, de_iir);
1425 ret = IRQ_HANDLED;
1426 }
1427
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001428 if (INTEL_INFO(dev)->gen >= 6) {
1429 u32 pm_iir = I915_READ(GEN6_PMIIR);
1430 if (pm_iir) {
1431 if (IS_HASWELL(dev))
1432 hsw_pm_irq_handler(dev_priv, pm_iir);
1433 else if (pm_iir & GEN6_PM_RPS_EVENTS)
1434 gen6_rps_irq_handler(dev_priv, pm_iir);
1435 I915_WRITE(GEN6_PMIIR, pm_iir);
1436 ret = IRQ_HANDLED;
1437 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001438 }
1439
Daniel Vetter4bc9d432013-06-27 13:44:58 +02001440 if (IS_HASWELL(dev)) {
1441 spin_lock(&dev_priv->irq_lock);
1442 if (ivb_can_enable_err_int(dev))
1443 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1444 spin_unlock(&dev_priv->irq_lock);
1445 }
Paulo Zanoni86642812013-04-12 17:57:57 -03001446
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001447 I915_WRITE(DEIER, de_ier);
1448 POSTING_READ(DEIER);
Ben Widawskyab5c6082013-04-05 13:12:41 -07001449 if (!HAS_PCH_NOP(dev)) {
1450 I915_WRITE(SDEIER, sde_ier);
1451 POSTING_READ(SDEIER);
1452 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001453
1454 return ret;
1455}
1456
Jesse Barnes8a905232009-07-11 16:48:03 -04001457/**
1458 * i915_error_work_func - do process context error handling work
1459 * @work: work struct
1460 *
1461 * Fire an error uevent so userspace can see that a hang or error
1462 * was detected.
1463 */
1464static void i915_error_work_func(struct work_struct *work)
1465{
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001466 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1467 work);
1468 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1469 gpu_error);
Jesse Barnes8a905232009-07-11 16:48:03 -04001470 struct drm_device *dev = dev_priv->dev;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001471 struct intel_ring_buffer *ring;
Ben Widawskycce723e2013-07-19 09:16:42 -07001472 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1473 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1474 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
Daniel Vetterf69061b2012-12-06 09:01:42 +01001475 int i, ret;
Jesse Barnes8a905232009-07-11 16:48:03 -04001476
Ben Gamarif316a422009-09-14 17:48:46 -04001477 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04001478
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001479 /*
1480 * Note that there's only one work item which does gpu resets, so we
1481 * need not worry about concurrent gpu resets potentially incrementing
1482 * error->reset_counter twice. We only need to take care of another
1483 * racing irq/hangcheck declaring the gpu dead for a second time. A
1484 * quick check for that is good enough: schedule_work ensures the
1485 * correct ordering between hang detection and this work item, and since
1486 * the reset in-progress bit is only ever set by code outside of this
1487 * work we don't need to worry about any other races.
1488 */
1489 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
Chris Wilsonf803aa52010-09-19 12:38:26 +01001490 DRM_DEBUG_DRIVER("resetting chip\n");
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001491 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1492 reset_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001493
Daniel Vetterf69061b2012-12-06 09:01:42 +01001494 ret = i915_reset(dev);
1495
1496 if (ret == 0) {
1497 /*
1498 * After all the gem state is reset, increment the reset
1499 * counter and wake up everyone waiting for the reset to
1500 * complete.
1501 *
1502 * Since unlock operations are a one-sided barrier only,
1503 * we need to insert a barrier here to order any seqno
1504 * updates before
1505 * the counter increment.
1506 */
1507 smp_mb__before_atomic_inc();
1508 atomic_inc(&dev_priv->gpu_error.reset_counter);
1509
1510 kobject_uevent_env(&dev->primary->kdev.kobj,
1511 KOBJ_CHANGE, reset_done_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001512 } else {
1513 atomic_set(&error->reset_counter, I915_WEDGED);
Ben Gamarif316a422009-09-14 17:48:46 -04001514 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001515
Daniel Vetterf69061b2012-12-06 09:01:42 +01001516 for_each_ring(ring, dev_priv, i)
1517 wake_up_all(&ring->irq_queue);
1518
Ville Syrjälä96a02912013-02-18 19:08:49 +02001519 intel_display_handle_reset(dev);
1520
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001521 wake_up_all(&dev_priv->gpu_error.reset_queue);
Ben Gamarif316a422009-09-14 17:48:46 -04001522 }
Jesse Barnes8a905232009-07-11 16:48:03 -04001523}
1524
Chris Wilson35aed2e2010-05-27 13:18:12 +01001525static void i915_report_and_clear_eir(struct drm_device *dev)
Jesse Barnes8a905232009-07-11 16:48:03 -04001526{
1527 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskybd9854f2012-08-23 15:18:09 -07001528 uint32_t instdone[I915_NUM_INSTDONE_REG];
Jesse Barnes8a905232009-07-11 16:48:03 -04001529 u32 eir = I915_READ(EIR);
Ben Widawsky050ee912012-08-22 11:32:15 -07001530 int pipe, i;
Jesse Barnes8a905232009-07-11 16:48:03 -04001531
Chris Wilson35aed2e2010-05-27 13:18:12 +01001532 if (!eir)
1533 return;
Jesse Barnes8a905232009-07-11 16:48:03 -04001534
Joe Perchesa70491c2012-03-18 13:00:11 -07001535 pr_err("render error detected, EIR: 0x%08x\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04001536
Ben Widawskybd9854f2012-08-23 15:18:09 -07001537 i915_get_extra_instdone(dev, instdone);
1538
Jesse Barnes8a905232009-07-11 16:48:03 -04001539 if (IS_G4X(dev)) {
1540 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1541 u32 ipeir = I915_READ(IPEIR_I965);
1542
Joe Perchesa70491c2012-03-18 13:00:11 -07001543 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1544 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Ben Widawsky050ee912012-08-22 11:32:15 -07001545 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1546 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Joe Perchesa70491c2012-03-18 13:00:11 -07001547 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07001548 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04001549 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001550 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04001551 }
1552 if (eir & GM45_ERROR_PAGE_TABLE) {
1553 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07001554 pr_err("page table error\n");
1555 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04001556 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001557 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04001558 }
1559 }
1560
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001561 if (!IS_GEN2(dev)) {
Jesse Barnes8a905232009-07-11 16:48:03 -04001562 if (eir & I915_ERROR_PAGE_TABLE) {
1563 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07001564 pr_err("page table error\n");
1565 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04001566 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001567 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04001568 }
1569 }
1570
1571 if (eir & I915_ERROR_MEMORY_REFRESH) {
Joe Perchesa70491c2012-03-18 13:00:11 -07001572 pr_err("memory refresh error:\n");
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001573 for_each_pipe(pipe)
Joe Perchesa70491c2012-03-18 13:00:11 -07001574 pr_err("pipe %c stat: 0x%08x\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001575 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
Jesse Barnes8a905232009-07-11 16:48:03 -04001576 /* pipestat has already been acked */
1577 }
1578 if (eir & I915_ERROR_INSTRUCTION) {
Joe Perchesa70491c2012-03-18 13:00:11 -07001579 pr_err("instruction error\n");
1580 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
Ben Widawsky050ee912012-08-22 11:32:15 -07001581 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1582 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001583 if (INTEL_INFO(dev)->gen < 4) {
Jesse Barnes8a905232009-07-11 16:48:03 -04001584 u32 ipeir = I915_READ(IPEIR);
1585
Joe Perchesa70491c2012-03-18 13:00:11 -07001586 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1587 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
Joe Perchesa70491c2012-03-18 13:00:11 -07001588 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
Jesse Barnes8a905232009-07-11 16:48:03 -04001589 I915_WRITE(IPEIR, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001590 POSTING_READ(IPEIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04001591 } else {
1592 u32 ipeir = I915_READ(IPEIR_I965);
1593
Joe Perchesa70491c2012-03-18 13:00:11 -07001594 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1595 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Joe Perchesa70491c2012-03-18 13:00:11 -07001596 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07001597 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04001598 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001599 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04001600 }
1601 }
1602
1603 I915_WRITE(EIR, eir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001604 POSTING_READ(EIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04001605 eir = I915_READ(EIR);
1606 if (eir) {
1607 /*
1608 * some errors might have become stuck,
1609 * mask them.
1610 */
1611 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1612 I915_WRITE(EMR, I915_READ(EMR) | eir);
1613 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1614 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01001615}
1616
1617/**
1618 * i915_handle_error - handle an error interrupt
1619 * @dev: drm device
1620 *
1621 * Do some basic checking of regsiter state at error interrupt time and
1622 * dump it to the syslog. Also call i915_capture_error_state() to make
1623 * sure we get a record and make it available in debugfs. Fire a uevent
1624 * so userspace knows something bad happened (should trigger collection
1625 * of a ring dump etc.).
1626 */
Chris Wilson527f9e92010-11-11 01:16:58 +00001627void i915_handle_error(struct drm_device *dev, bool wedged)
Chris Wilson35aed2e2010-05-27 13:18:12 +01001628{
1629 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01001630 struct intel_ring_buffer *ring;
1631 int i;
Chris Wilson35aed2e2010-05-27 13:18:12 +01001632
1633 i915_capture_error_state(dev);
1634 i915_report_and_clear_eir(dev);
Jesse Barnes8a905232009-07-11 16:48:03 -04001635
Ben Gamariba1234d2009-09-14 17:48:47 -04001636 if (wedged) {
Daniel Vetterf69061b2012-12-06 09:01:42 +01001637 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1638 &dev_priv->gpu_error.reset_counter);
Ben Gamariba1234d2009-09-14 17:48:47 -04001639
Ben Gamari11ed50e2009-09-14 17:48:45 -04001640 /*
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001641 * Wakeup waiting processes so that the reset work item
1642 * doesn't deadlock trying to grab various locks.
Ben Gamari11ed50e2009-09-14 17:48:45 -04001643 */
Chris Wilsonb4519512012-05-11 14:29:30 +01001644 for_each_ring(ring, dev_priv, i)
1645 wake_up_all(&ring->irq_queue);
Ben Gamari11ed50e2009-09-14 17:48:45 -04001646 }
1647
Daniel Vetter99584db2012-11-14 17:14:04 +01001648 queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
Jesse Barnes8a905232009-07-11 16:48:03 -04001649}
1650
Ville Syrjälä21ad8332013-02-19 15:16:39 +02001651static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001652{
1653 drm_i915_private_t *dev_priv = dev->dev_private;
1654 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1655 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Chris Wilson05394f32010-11-08 19:18:58 +00001656 struct drm_i915_gem_object *obj;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001657 struct intel_unpin_work *work;
1658 unsigned long flags;
1659 bool stall_detected;
1660
1661 /* Ignore early vblank irqs */
1662 if (intel_crtc == NULL)
1663 return;
1664
1665 spin_lock_irqsave(&dev->event_lock, flags);
1666 work = intel_crtc->unpin_work;
1667
Chris Wilsone7d841c2012-12-03 11:36:30 +00001668 if (work == NULL ||
1669 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1670 !work->enable_stall_check) {
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001671 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1672 spin_unlock_irqrestore(&dev->event_lock, flags);
1673 return;
1674 }
1675
1676 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
Chris Wilson05394f32010-11-08 19:18:58 +00001677 obj = work->pending_flip_obj;
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001678 if (INTEL_INFO(dev)->gen >= 4) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001679 int dspsurf = DSPSURF(intel_crtc->plane);
Armin Reese446f2542012-03-30 16:20:16 -07001680 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001681 i915_gem_obj_ggtt_offset(obj);
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001682 } else {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001683 int dspaddr = DSPADDR(intel_crtc->plane);
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001684 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
Ville Syrjälä01f2c772011-12-20 00:06:49 +02001685 crtc->y * crtc->fb->pitches[0] +
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001686 crtc->x * crtc->fb->bits_per_pixel/8);
1687 }
1688
1689 spin_unlock_irqrestore(&dev->event_lock, flags);
1690
1691 if (stall_detected) {
1692 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1693 intel_prepare_page_flip(dev, intel_crtc->plane);
1694 }
1695}
1696
Keith Packard42f52ef2008-10-18 19:39:29 -07001697/* Called from drm generic code, passed 'crtc' which
1698 * we use as a pipe index
1699 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07001700static int i915_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07001701{
1702 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07001703 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08001704
Chris Wilson5eddb702010-09-11 13:48:45 +01001705 if (!i915_pipe_enabled(dev, pipe))
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08001706 return -EINVAL;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07001707
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001708 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07001709 if (INTEL_INFO(dev)->gen >= 4)
Keith Packard7c463582008-11-04 02:03:27 -08001710 i915_enable_pipestat(dev_priv, pipe,
1711 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Keith Packarde9d21d72008-10-16 11:31:38 -07001712 else
Keith Packard7c463582008-11-04 02:03:27 -08001713 i915_enable_pipestat(dev_priv, pipe,
1714 PIPE_VBLANK_INTERRUPT_ENABLE);
Chris Wilson8692d00e2011-02-05 10:08:21 +00001715
1716 /* maintain vblank delivery even in deep C-states */
1717 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02001718 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001719 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00001720
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07001721 return 0;
1722}
1723
Jesse Barnesf71d4af2011-06-28 13:00:41 -07001724static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07001725{
1726 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1727 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03001728 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1729 DE_PIPE_VBLANK_ILK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07001730
1731 if (!i915_pipe_enabled(dev, pipe))
1732 return -EINVAL;
1733
1734 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03001735 ironlake_enable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001736 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1737
1738 return 0;
1739}
1740
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001741static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1742{
1743 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1744 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001745 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001746
1747 if (!i915_pipe_enabled(dev, pipe))
1748 return -EINVAL;
1749
1750 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001751 imr = I915_READ(VLV_IMR);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001752 if (pipe == 0)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001753 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001754 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001755 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001756 I915_WRITE(VLV_IMR, imr);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001757 i915_enable_pipestat(dev_priv, pipe,
1758 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001759 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1760
1761 return 0;
1762}
1763
Keith Packard42f52ef2008-10-18 19:39:29 -07001764/* Called from drm generic code, passed 'crtc' which
1765 * we use as a pipe index
1766 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07001767static void i915_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07001768{
1769 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07001770 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07001771
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001772 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00001773 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02001774 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson8692d00e2011-02-05 10:08:21 +00001775
Jesse Barnesf796cf82011-04-07 13:58:17 -07001776 i915_disable_pipestat(dev_priv, pipe,
1777 PIPE_VBLANK_INTERRUPT_ENABLE |
1778 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1779 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1780}
1781
Jesse Barnesf71d4af2011-06-28 13:00:41 -07001782static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07001783{
1784 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1785 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03001786 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1787 DE_PIPE_VBLANK_ILK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07001788
1789 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03001790 ironlake_disable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001791 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1792}
1793
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001794static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1795{
1796 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1797 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001798 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001799
1800 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001801 i915_disable_pipestat(dev_priv, pipe,
1802 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001803 imr = I915_READ(VLV_IMR);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001804 if (pipe == 0)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001805 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001806 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001807 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001808 I915_WRITE(VLV_IMR, imr);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001809 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1810}
1811
Chris Wilson893eead2010-10-27 14:44:35 +01001812static u32
1813ring_last_seqno(struct intel_ring_buffer *ring)
Zou Nan hai852835f2010-05-21 09:08:56 +08001814{
Chris Wilson893eead2010-10-27 14:44:35 +01001815 return list_entry(ring->request_list.prev,
1816 struct drm_i915_gem_request, list)->seqno;
1817}
1818
Chris Wilson9107e9d2013-06-10 11:20:20 +01001819static bool
1820ring_idle(struct intel_ring_buffer *ring, u32 seqno)
Chris Wilson893eead2010-10-27 14:44:35 +01001821{
Chris Wilson9107e9d2013-06-10 11:20:20 +01001822 return (list_empty(&ring->request_list) ||
1823 i915_seqno_passed(seqno, ring_last_seqno(ring)));
Ben Gamarif65d9422009-09-14 17:48:44 -04001824}
1825
Chris Wilson6274f212013-06-10 11:20:21 +01001826static struct intel_ring_buffer *
1827semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
Chris Wilsona24a11e2013-03-14 17:52:05 +02001828{
1829 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Chris Wilson6274f212013-06-10 11:20:21 +01001830 u32 cmd, ipehr, acthd, acthd_min;
Chris Wilsona24a11e2013-03-14 17:52:05 +02001831
1832 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1833 if ((ipehr & ~(0x3 << 16)) !=
1834 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
Chris Wilson6274f212013-06-10 11:20:21 +01001835 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02001836
1837 /* ACTHD is likely pointing to the dword after the actual command,
1838 * so scan backwards until we find the MBOX.
1839 */
Chris Wilson6274f212013-06-10 11:20:21 +01001840 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
Chris Wilsona24a11e2013-03-14 17:52:05 +02001841 acthd_min = max((int)acthd - 3 * 4, 0);
1842 do {
1843 cmd = ioread32(ring->virtual_start + acthd);
1844 if (cmd == ipehr)
1845 break;
1846
1847 acthd -= 4;
1848 if (acthd < acthd_min)
Chris Wilson6274f212013-06-10 11:20:21 +01001849 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02001850 } while (1);
1851
Chris Wilson6274f212013-06-10 11:20:21 +01001852 *seqno = ioread32(ring->virtual_start+acthd+4)+1;
1853 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
Chris Wilsona24a11e2013-03-14 17:52:05 +02001854}
1855
Chris Wilson6274f212013-06-10 11:20:21 +01001856static int semaphore_passed(struct intel_ring_buffer *ring)
1857{
1858 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1859 struct intel_ring_buffer *signaller;
1860 u32 seqno, ctl;
1861
1862 ring->hangcheck.deadlock = true;
1863
1864 signaller = semaphore_waits_for(ring, &seqno);
1865 if (signaller == NULL || signaller->hangcheck.deadlock)
1866 return -1;
1867
1868 /* cursory check for an unkickable deadlock */
1869 ctl = I915_READ_CTL(signaller);
1870 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
1871 return -1;
1872
1873 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
1874}
1875
1876static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
1877{
1878 struct intel_ring_buffer *ring;
1879 int i;
1880
1881 for_each_ring(ring, dev_priv, i)
1882 ring->hangcheck.deadlock = false;
1883}
1884
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03001885static enum intel_ring_hangcheck_action
1886ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001887{
1888 struct drm_device *dev = ring->dev;
1889 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9107e9d2013-06-10 11:20:20 +01001890 u32 tmp;
1891
Chris Wilson6274f212013-06-10 11:20:21 +01001892 if (ring->hangcheck.acthd != acthd)
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001893 return HANGCHECK_ACTIVE;
Chris Wilson6274f212013-06-10 11:20:21 +01001894
Chris Wilson9107e9d2013-06-10 11:20:20 +01001895 if (IS_GEN2(dev))
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001896 return HANGCHECK_HUNG;
Chris Wilson9107e9d2013-06-10 11:20:20 +01001897
1898 /* Is the chip hanging on a WAIT_FOR_EVENT?
1899 * If so we can simply poke the RB_WAIT bit
1900 * and break the hang. This should work on
1901 * all but the second generation chipsets.
1902 */
1903 tmp = I915_READ_CTL(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001904 if (tmp & RING_WAIT) {
1905 DRM_ERROR("Kicking stuck wait on %s\n",
1906 ring->name);
1907 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001908 return HANGCHECK_KICK;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001909 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02001910
Chris Wilson6274f212013-06-10 11:20:21 +01001911 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
1912 switch (semaphore_passed(ring)) {
1913 default:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001914 return HANGCHECK_HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01001915 case 1:
1916 DRM_ERROR("Kicking stuck semaphore on %s\n",
1917 ring->name);
1918 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001919 return HANGCHECK_KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01001920 case 0:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001921 return HANGCHECK_WAIT;
Chris Wilson6274f212013-06-10 11:20:21 +01001922 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01001923 }
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03001924
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001925 return HANGCHECK_HUNG;
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03001926}
1927
Ben Gamarif65d9422009-09-14 17:48:44 -04001928/**
1929 * This is called when the chip hasn't reported back with completed
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001930 * batchbuffers in a long time. We keep track per ring seqno progress and
1931 * if there are no progress, hangcheck score for that ring is increased.
1932 * Further, acthd is inspected to see if the ring is stuck. On stuck case
1933 * we kick the ring. If we see no progress on three subsequent calls
1934 * we assume chip is wedged and try to fix it by resetting the chip.
Ben Gamarif65d9422009-09-14 17:48:44 -04001935 */
Damien Lespiaua658b5d2013-08-08 22:28:56 +01001936static void i915_hangcheck_elapsed(unsigned long data)
Ben Gamarif65d9422009-09-14 17:48:44 -04001937{
1938 struct drm_device *dev = (struct drm_device *)data;
1939 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01001940 struct intel_ring_buffer *ring;
Chris Wilsonb4519512012-05-11 14:29:30 +01001941 int i;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001942 int busy_count = 0, rings_hung = 0;
Chris Wilson9107e9d2013-06-10 11:20:20 +01001943 bool stuck[I915_NUM_RINGS] = { 0 };
1944#define BUSY 1
1945#define KICK 5
1946#define HUNG 20
1947#define FIRE 30
Chris Wilson893eead2010-10-27 14:44:35 +01001948
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07001949 if (!i915_enable_hangcheck)
1950 return;
1951
Chris Wilsonb4519512012-05-11 14:29:30 +01001952 for_each_ring(ring, dev_priv, i) {
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001953 u32 seqno, acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01001954 bool busy = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01001955
Chris Wilson6274f212013-06-10 11:20:21 +01001956 semaphore_clear_deadlocks(dev_priv);
1957
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001958 seqno = ring->get_seqno(ring, false);
1959 acthd = intel_ring_get_active_head(ring);
Chris Wilsond1e61e72012-04-10 17:00:41 +01001960
Chris Wilson9107e9d2013-06-10 11:20:20 +01001961 if (ring->hangcheck.seqno == seqno) {
1962 if (ring_idle(ring, seqno)) {
1963 if (waitqueue_active(&ring->irq_queue)) {
1964 /* Issue a wake-up to catch stuck h/w. */
1965 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1966 ring->name);
1967 wake_up_all(&ring->irq_queue);
1968 ring->hangcheck.score += HUNG;
1969 } else
1970 busy = false;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001971 } else {
Chris Wilson6274f212013-06-10 11:20:21 +01001972 /* We always increment the hangcheck score
1973 * if the ring is busy and still processing
1974 * the same request, so that no single request
1975 * can run indefinitely (such as a chain of
1976 * batches). The only time we do not increment
1977 * the hangcheck score on this ring, if this
1978 * ring is in a legitimate wait for another
1979 * ring. In that case the waiting ring is a
1980 * victim and we want to be sure we catch the
1981 * right culprit. Then every time we do kick
1982 * the ring, add a small increment to the
1983 * score so that we can catch a batch that is
1984 * being repeatedly kicked and so responsible
1985 * for stalling the machine.
1986 */
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03001987 ring->hangcheck.action = ring_stuck(ring,
1988 acthd);
1989
1990 switch (ring->hangcheck.action) {
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001991 case HANGCHECK_WAIT:
Chris Wilson6274f212013-06-10 11:20:21 +01001992 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001993 case HANGCHECK_ACTIVE:
Jani Nikulaea04cb32013-08-11 12:44:02 +03001994 ring->hangcheck.score += BUSY;
Chris Wilson6274f212013-06-10 11:20:21 +01001995 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001996 case HANGCHECK_KICK:
Jani Nikulaea04cb32013-08-11 12:44:02 +03001997 ring->hangcheck.score += KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01001998 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001999 case HANGCHECK_HUNG:
Jani Nikulaea04cb32013-08-11 12:44:02 +03002000 ring->hangcheck.score += HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01002001 stuck[i] = true;
2002 break;
2003 }
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002004 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002005 } else {
2006 /* Gradually reduce the count so that we catch DoS
2007 * attempts across multiple batches.
2008 */
2009 if (ring->hangcheck.score > 0)
2010 ring->hangcheck.score--;
Chris Wilsond1e61e72012-04-10 17:00:41 +01002011 }
2012
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002013 ring->hangcheck.seqno = seqno;
2014 ring->hangcheck.acthd = acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002015 busy_count += busy;
Chris Wilson893eead2010-10-27 14:44:35 +01002016 }
Eric Anholtb9201c12010-01-08 14:25:16 -08002017
Mika Kuoppala92cab732013-05-24 17:16:07 +03002018 for_each_ring(ring, dev_priv, i) {
Chris Wilson9107e9d2013-06-10 11:20:20 +01002019 if (ring->hangcheck.score > FIRE) {
Ben Widawskyacd78c12013-06-13 21:33:33 -07002020 DRM_ERROR("%s on %s\n",
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002021 stuck[i] ? "stuck" : "no progress",
Chris Wilsona43adf02013-06-10 11:20:22 +01002022 ring->name);
2023 rings_hung++;
Mika Kuoppala92cab732013-05-24 17:16:07 +03002024 }
2025 }
2026
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002027 if (rings_hung)
2028 return i915_handle_error(dev, true);
Ben Gamarif65d9422009-09-14 17:48:44 -04002029
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002030 if (busy_count)
2031 /* Reset timer case chip hangs without another request
2032 * being added */
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002033 i915_queue_hangcheck(dev);
2034}
2035
2036void i915_queue_hangcheck(struct drm_device *dev)
2037{
2038 struct drm_i915_private *dev_priv = dev->dev_private;
2039 if (!i915_enable_hangcheck)
2040 return;
2041
2042 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2043 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
Ben Gamarif65d9422009-09-14 17:48:44 -04002044}
2045
Paulo Zanoni91738a92013-06-05 14:21:51 -03002046static void ibx_irq_preinstall(struct drm_device *dev)
2047{
2048 struct drm_i915_private *dev_priv = dev->dev_private;
2049
2050 if (HAS_PCH_NOP(dev))
2051 return;
2052
2053 /* south display irq */
2054 I915_WRITE(SDEIMR, 0xffffffff);
2055 /*
2056 * SDEIER is also touched by the interrupt handler to work around missed
2057 * PCH interrupts. Hence we can't update it after the interrupt handler
2058 * is enabled - instead we unconditionally enable all PCH interrupt
2059 * sources here, but then only unmask them as needed with SDEIMR.
2060 */
2061 I915_WRITE(SDEIER, 0xffffffff);
2062 POSTING_READ(SDEIER);
2063}
2064
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002065static void gen5_gt_irq_preinstall(struct drm_device *dev)
2066{
2067 struct drm_i915_private *dev_priv = dev->dev_private;
2068
2069 /* and GT */
2070 I915_WRITE(GTIMR, 0xffffffff);
2071 I915_WRITE(GTIER, 0x0);
2072 POSTING_READ(GTIER);
2073
2074 if (INTEL_INFO(dev)->gen >= 6) {
2075 /* and PM */
2076 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2077 I915_WRITE(GEN6_PMIER, 0x0);
2078 POSTING_READ(GEN6_PMIER);
2079 }
2080}
2081
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082/* drm_dma.h hooks
2083*/
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002084static void ironlake_irq_preinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002085{
2086 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2087
Jesse Barnes46979952011-04-07 13:53:55 -07002088 atomic_set(&dev_priv->irq_received, 0);
2089
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002090 I915_WRITE(HWSTAM, 0xeffe);
Daniel Vetterbdfcdb62012-01-05 01:05:26 +01002091
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002092 I915_WRITE(DEIMR, 0xffffffff);
2093 I915_WRITE(DEIER, 0x0);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002094 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002095
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002096 gen5_gt_irq_preinstall(dev);
Zhenyu Wangc6501562009-11-03 18:57:21 +00002097
Paulo Zanoni91738a92013-06-05 14:21:51 -03002098 ibx_irq_preinstall(dev);
Ben Widawsky7d991632013-05-28 19:22:25 -07002099}
2100
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002101static void valleyview_irq_preinstall(struct drm_device *dev)
2102{
2103 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2104 int pipe;
2105
2106 atomic_set(&dev_priv->irq_received, 0);
2107
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002108 /* VLV magic */
2109 I915_WRITE(VLV_IMR, 0);
2110 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2111 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2112 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2113
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002114 /* and GT */
2115 I915_WRITE(GTIIR, I915_READ(GTIIR));
2116 I915_WRITE(GTIIR, I915_READ(GTIIR));
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002117
2118 gen5_gt_irq_preinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002119
2120 I915_WRITE(DPINVGTT, 0xff);
2121
2122 I915_WRITE(PORT_HOTPLUG_EN, 0);
2123 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2124 for_each_pipe(pipe)
2125 I915_WRITE(PIPESTAT(pipe), 0xffff);
2126 I915_WRITE(VLV_IIR, 0xffffffff);
2127 I915_WRITE(VLV_IMR, 0xffffffff);
2128 I915_WRITE(VLV_IER, 0x0);
2129 POSTING_READ(VLV_IER);
2130}
2131
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002132static void ibx_hpd_irq_setup(struct drm_device *dev)
Keith Packard7fe0b972011-09-19 13:31:02 -07002133{
2134 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002135 struct drm_mode_config *mode_config = &dev->mode_config;
2136 struct intel_encoder *intel_encoder;
Daniel Vetterfee884e2013-07-04 23:35:21 +02002137 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
Keith Packard7fe0b972011-09-19 13:31:02 -07002138
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002139 if (HAS_PCH_IBX(dev)) {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002140 hotplug_irqs = SDE_HOTPLUG_MASK;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002141 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002142 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002143 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002144 } else {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002145 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002146 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002147 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002148 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002149 }
2150
Daniel Vetterfee884e2013-07-04 23:35:21 +02002151 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002152
2153 /*
2154 * Enable digital hotplug on the PCH, and configure the DP short pulse
2155 * duration to 2ms (which is the minimum in the Display Port spec)
2156 *
2157 * This register is the same on all known PCH chips.
2158 */
Keith Packard7fe0b972011-09-19 13:31:02 -07002159 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2160 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2161 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2162 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2163 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2164 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2165}
2166
Paulo Zanonid46da432013-02-08 17:35:15 -02002167static void ibx_irq_postinstall(struct drm_device *dev)
2168{
2169 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002170 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02002171
Daniel Vetter692a04c2013-05-29 21:43:05 +02002172 if (HAS_PCH_NOP(dev))
2173 return;
2174
Paulo Zanoni86642812013-04-12 17:57:57 -03002175 if (HAS_PCH_IBX(dev)) {
2176 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
Paulo Zanonide032bf2013-04-12 17:57:58 -03002177 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
Paulo Zanoni86642812013-04-12 17:57:57 -03002178 } else {
2179 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2180
2181 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2182 }
Ben Widawskyab5c6082013-04-05 13:12:41 -07002183
Paulo Zanonid46da432013-02-08 17:35:15 -02002184 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2185 I915_WRITE(SDEIMR, ~mask);
Paulo Zanonid46da432013-02-08 17:35:15 -02002186}
2187
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002188static void gen5_gt_irq_postinstall(struct drm_device *dev)
2189{
2190 struct drm_i915_private *dev_priv = dev->dev_private;
2191 u32 pm_irqs, gt_irqs;
2192
2193 pm_irqs = gt_irqs = 0;
2194
2195 dev_priv->gt_irq_mask = ~0;
2196 if (HAS_L3_GPU_CACHE(dev)) {
2197 /* L3 parity interrupt is always unmasked. */
2198 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2199 gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2200 }
2201
2202 gt_irqs |= GT_RENDER_USER_INTERRUPT;
2203 if (IS_GEN5(dev)) {
2204 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2205 ILK_BSD_USER_INTERRUPT;
2206 } else {
2207 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2208 }
2209
2210 I915_WRITE(GTIIR, I915_READ(GTIIR));
2211 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2212 I915_WRITE(GTIER, gt_irqs);
2213 POSTING_READ(GTIER);
2214
2215 if (INTEL_INFO(dev)->gen >= 6) {
2216 pm_irqs |= GEN6_PM_RPS_EVENTS;
2217
2218 if (HAS_VEBOX(dev))
2219 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2220
Paulo Zanoni605cd252013-08-06 18:57:15 -03002221 dev_priv->pm_irq_mask = 0xffffffff;
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002222 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
Paulo Zanoni605cd252013-08-06 18:57:15 -03002223 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002224 I915_WRITE(GEN6_PMIER, pm_irqs);
2225 POSTING_READ(GEN6_PMIER);
2226 }
2227}
2228
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002229static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002230{
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002231 unsigned long irqflags;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002232 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002233 u32 display_mask, extra_mask;
2234
2235 if (INTEL_INFO(dev)->gen >= 7) {
2236 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2237 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
2238 DE_PLANEB_FLIP_DONE_IVB |
2239 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2240 DE_ERR_INT_IVB);
2241 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2242 DE_PIPEA_VBLANK_IVB);
2243
2244 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2245 } else {
2246 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2247 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2248 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2249 DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
2250 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2251 }
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002252
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002253 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002254
2255 /* should always can generate irq */
2256 I915_WRITE(DEIIR, I915_READ(DEIIR));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002257 I915_WRITE(DEIMR, dev_priv->irq_mask);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002258 I915_WRITE(DEIER, display_mask | extra_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002259 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002260
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002261 gen5_gt_irq_postinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002262
Paulo Zanonid46da432013-02-08 17:35:15 -02002263 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07002264
Jesse Barnesf97108d2010-01-29 11:27:07 -08002265 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02002266 /* Enable PCU event interrupts
2267 *
2268 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002269 * setup is guaranteed to run in single-threaded context. But we
2270 * need it to make the assert_spin_locked happy. */
2271 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08002272 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002273 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08002274 }
2275
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002276 return 0;
2277}
2278
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002279static int valleyview_irq_postinstall(struct drm_device *dev)
2280{
2281 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002282 u32 enable_mask;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002283 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
Daniel Vetterb79480b2013-06-27 17:52:10 +02002284 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002285
2286 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002287 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2288 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2289 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002290 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2291
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002292 /*
2293 *Leave vblank interrupts masked initially. enable/disable will
2294 * toggle them based on usage.
2295 */
2296 dev_priv->irq_mask = (~enable_mask) |
2297 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2298 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002299
Daniel Vetter20afbda2012-12-11 14:05:07 +01002300 I915_WRITE(PORT_HOTPLUG_EN, 0);
2301 POSTING_READ(PORT_HOTPLUG_EN);
2302
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002303 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2304 I915_WRITE(VLV_IER, enable_mask);
2305 I915_WRITE(VLV_IIR, 0xffffffff);
2306 I915_WRITE(PIPESTAT(0), 0xffff);
2307 I915_WRITE(PIPESTAT(1), 0xffff);
2308 POSTING_READ(VLV_IER);
2309
Daniel Vetterb79480b2013-06-27 17:52:10 +02002310 /* Interrupt setup is already guaranteed to be single-threaded, this is
2311 * just to make the assert_spin_locked check happy. */
2312 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002313 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01002314 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002315 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
Daniel Vetterb79480b2013-06-27 17:52:10 +02002316 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002317
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002318 I915_WRITE(VLV_IIR, 0xffffffff);
2319 I915_WRITE(VLV_IIR, 0xffffffff);
2320
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002321 gen5_gt_irq_postinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002322
2323 /* ack & enable invalid PTE error interrupts */
2324#if 0 /* FIXME: add support to irq handler for checking these bits */
2325 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2326 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2327#endif
2328
2329 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Daniel Vetter20afbda2012-12-11 14:05:07 +01002330
2331 return 0;
2332}
2333
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002334static void valleyview_irq_uninstall(struct drm_device *dev)
2335{
2336 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2337 int pipe;
2338
2339 if (!dev_priv)
2340 return;
2341
Egbert Eichac4c16c2013-04-16 13:36:58 +02002342 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2343
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002344 for_each_pipe(pipe)
2345 I915_WRITE(PIPESTAT(pipe), 0xffff);
2346
2347 I915_WRITE(HWSTAM, 0xffffffff);
2348 I915_WRITE(PORT_HOTPLUG_EN, 0);
2349 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2350 for_each_pipe(pipe)
2351 I915_WRITE(PIPESTAT(pipe), 0xffff);
2352 I915_WRITE(VLV_IIR, 0xffffffff);
2353 I915_WRITE(VLV_IMR, 0xffffffff);
2354 I915_WRITE(VLV_IER, 0x0);
2355 POSTING_READ(VLV_IER);
2356}
2357
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002358static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002359{
2360 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes46979952011-04-07 13:53:55 -07002361
2362 if (!dev_priv)
2363 return;
2364
Egbert Eichac4c16c2013-04-16 13:36:58 +02002365 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2366
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002367 I915_WRITE(HWSTAM, 0xffffffff);
2368
2369 I915_WRITE(DEIMR, 0xffffffff);
2370 I915_WRITE(DEIER, 0x0);
2371 I915_WRITE(DEIIR, I915_READ(DEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03002372 if (IS_GEN7(dev))
2373 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002374
2375 I915_WRITE(GTIMR, 0xffffffff);
2376 I915_WRITE(GTIER, 0x0);
2377 I915_WRITE(GTIIR, I915_READ(GTIIR));
Keith Packard192aac1f2011-09-20 10:12:44 -07002378
Ben Widawskyab5c6082013-04-05 13:12:41 -07002379 if (HAS_PCH_NOP(dev))
2380 return;
2381
Keith Packard192aac1f2011-09-20 10:12:44 -07002382 I915_WRITE(SDEIMR, 0xffffffff);
2383 I915_WRITE(SDEIER, 0x0);
2384 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03002385 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2386 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002387}
2388
Chris Wilsonc2798b12012-04-22 21:13:57 +01002389static void i8xx_irq_preinstall(struct drm_device * dev)
2390{
2391 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2392 int pipe;
2393
2394 atomic_set(&dev_priv->irq_received, 0);
2395
2396 for_each_pipe(pipe)
2397 I915_WRITE(PIPESTAT(pipe), 0);
2398 I915_WRITE16(IMR, 0xffff);
2399 I915_WRITE16(IER, 0x0);
2400 POSTING_READ16(IER);
2401}
2402
2403static int i8xx_irq_postinstall(struct drm_device *dev)
2404{
2405 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2406
Chris Wilsonc2798b12012-04-22 21:13:57 +01002407 I915_WRITE16(EMR,
2408 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2409
2410 /* Unmask the interrupts that we always want on. */
2411 dev_priv->irq_mask =
2412 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2413 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2414 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2415 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2416 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2417 I915_WRITE16(IMR, dev_priv->irq_mask);
2418
2419 I915_WRITE16(IER,
2420 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2421 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2422 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2423 I915_USER_INTERRUPT);
2424 POSTING_READ16(IER);
2425
2426 return 0;
2427}
2428
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002429/*
2430 * Returns true when a page flip has completed.
2431 */
2432static bool i8xx_handle_vblank(struct drm_device *dev,
2433 int pipe, u16 iir)
2434{
2435 drm_i915_private_t *dev_priv = dev->dev_private;
2436 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2437
2438 if (!drm_handle_vblank(dev, pipe))
2439 return false;
2440
2441 if ((iir & flip_pending) == 0)
2442 return false;
2443
2444 intel_prepare_page_flip(dev, pipe);
2445
2446 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2447 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2448 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2449 * the flip is completed (no longer pending). Since this doesn't raise
2450 * an interrupt per se, we watch for the change at vblank.
2451 */
2452 if (I915_READ16(ISR) & flip_pending)
2453 return false;
2454
2455 intel_finish_page_flip(dev, pipe);
2456
2457 return true;
2458}
2459
Daniel Vetterff1f5252012-10-02 15:10:55 +02002460static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01002461{
2462 struct drm_device *dev = (struct drm_device *) arg;
2463 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01002464 u16 iir, new_iir;
2465 u32 pipe_stats[2];
2466 unsigned long irqflags;
Chris Wilsonc2798b12012-04-22 21:13:57 +01002467 int pipe;
2468 u16 flip_mask =
2469 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2470 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2471
2472 atomic_inc(&dev_priv->irq_received);
2473
2474 iir = I915_READ16(IIR);
2475 if (iir == 0)
2476 return IRQ_NONE;
2477
2478 while (iir & ~flip_mask) {
2479 /* Can't rely on pipestat interrupt bit in iir as it might
2480 * have been cleared after the pipestat interrupt was received.
2481 * It doesn't set the bit in iir again, but it still produces
2482 * interrupts (for non-MSI).
2483 */
2484 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2485 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2486 i915_handle_error(dev, false);
2487
2488 for_each_pipe(pipe) {
2489 int reg = PIPESTAT(pipe);
2490 pipe_stats[pipe] = I915_READ(reg);
2491
2492 /*
2493 * Clear the PIPE*STAT regs before the IIR
2494 */
2495 if (pipe_stats[pipe] & 0x8000ffff) {
2496 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2497 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2498 pipe_name(pipe));
2499 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002500 }
2501 }
2502 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2503
2504 I915_WRITE16(IIR, iir & ~flip_mask);
2505 new_iir = I915_READ16(IIR); /* Flush posted writes */
2506
Daniel Vetterd05c6172012-04-26 23:28:09 +02002507 i915_update_dri1_breadcrumb(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002508
2509 if (iir & I915_USER_INTERRUPT)
2510 notify_ring(dev, &dev_priv->ring[RCS]);
2511
2512 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002513 i8xx_handle_vblank(dev, 0, iir))
2514 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002515
2516 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002517 i8xx_handle_vblank(dev, 1, iir))
2518 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002519
2520 iir = new_iir;
2521 }
2522
2523 return IRQ_HANDLED;
2524}
2525
2526static void i8xx_irq_uninstall(struct drm_device * dev)
2527{
2528 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2529 int pipe;
2530
Chris Wilsonc2798b12012-04-22 21:13:57 +01002531 for_each_pipe(pipe) {
2532 /* Clear enable bits; then clear status bits */
2533 I915_WRITE(PIPESTAT(pipe), 0);
2534 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2535 }
2536 I915_WRITE16(IMR, 0xffff);
2537 I915_WRITE16(IER, 0x0);
2538 I915_WRITE16(IIR, I915_READ16(IIR));
2539}
2540
Chris Wilsona266c7d2012-04-24 22:59:44 +01002541static void i915_irq_preinstall(struct drm_device * dev)
2542{
2543 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2544 int pipe;
2545
2546 atomic_set(&dev_priv->irq_received, 0);
2547
2548 if (I915_HAS_HOTPLUG(dev)) {
2549 I915_WRITE(PORT_HOTPLUG_EN, 0);
2550 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2551 }
2552
Chris Wilson00d98eb2012-04-24 22:59:48 +01002553 I915_WRITE16(HWSTAM, 0xeffe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002554 for_each_pipe(pipe)
2555 I915_WRITE(PIPESTAT(pipe), 0);
2556 I915_WRITE(IMR, 0xffffffff);
2557 I915_WRITE(IER, 0x0);
2558 POSTING_READ(IER);
2559}
2560
2561static int i915_irq_postinstall(struct drm_device *dev)
2562{
2563 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson38bde182012-04-24 22:59:50 +01002564 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002565
Chris Wilson38bde182012-04-24 22:59:50 +01002566 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2567
2568 /* Unmask the interrupts that we always want on. */
2569 dev_priv->irq_mask =
2570 ~(I915_ASLE_INTERRUPT |
2571 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2572 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2573 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2574 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2575 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2576
2577 enable_mask =
2578 I915_ASLE_INTERRUPT |
2579 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2580 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2581 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2582 I915_USER_INTERRUPT;
2583
Chris Wilsona266c7d2012-04-24 22:59:44 +01002584 if (I915_HAS_HOTPLUG(dev)) {
Daniel Vetter20afbda2012-12-11 14:05:07 +01002585 I915_WRITE(PORT_HOTPLUG_EN, 0);
2586 POSTING_READ(PORT_HOTPLUG_EN);
2587
Chris Wilsona266c7d2012-04-24 22:59:44 +01002588 /* Enable in IER... */
2589 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2590 /* and unmask in IMR */
2591 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2592 }
2593
Chris Wilsona266c7d2012-04-24 22:59:44 +01002594 I915_WRITE(IMR, dev_priv->irq_mask);
2595 I915_WRITE(IER, enable_mask);
2596 POSTING_READ(IER);
2597
Jani Nikulaf49e38d2013-04-29 13:02:54 +03002598 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01002599
2600 return 0;
2601}
2602
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002603/*
2604 * Returns true when a page flip has completed.
2605 */
2606static bool i915_handle_vblank(struct drm_device *dev,
2607 int plane, int pipe, u32 iir)
2608{
2609 drm_i915_private_t *dev_priv = dev->dev_private;
2610 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2611
2612 if (!drm_handle_vblank(dev, pipe))
2613 return false;
2614
2615 if ((iir & flip_pending) == 0)
2616 return false;
2617
2618 intel_prepare_page_flip(dev, plane);
2619
2620 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2621 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2622 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2623 * the flip is completed (no longer pending). Since this doesn't raise
2624 * an interrupt per se, we watch for the change at vblank.
2625 */
2626 if (I915_READ(ISR) & flip_pending)
2627 return false;
2628
2629 intel_finish_page_flip(dev, pipe);
2630
2631 return true;
2632}
2633
Daniel Vetterff1f5252012-10-02 15:10:55 +02002634static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01002635{
2636 struct drm_device *dev = (struct drm_device *) arg;
2637 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson8291ee92012-04-24 22:59:47 +01002638 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01002639 unsigned long irqflags;
Chris Wilson38bde182012-04-24 22:59:50 +01002640 u32 flip_mask =
2641 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2642 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilson38bde182012-04-24 22:59:50 +01002643 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002644
2645 atomic_inc(&dev_priv->irq_received);
2646
2647 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01002648 do {
2649 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01002650 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002651
2652 /* Can't rely on pipestat interrupt bit in iir as it might
2653 * have been cleared after the pipestat interrupt was received.
2654 * It doesn't set the bit in iir again, but it still produces
2655 * interrupts (for non-MSI).
2656 */
2657 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2658 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2659 i915_handle_error(dev, false);
2660
2661 for_each_pipe(pipe) {
2662 int reg = PIPESTAT(pipe);
2663 pipe_stats[pipe] = I915_READ(reg);
2664
Chris Wilson38bde182012-04-24 22:59:50 +01002665 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01002666 if (pipe_stats[pipe] & 0x8000ffff) {
2667 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2668 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2669 pipe_name(pipe));
2670 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01002671 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002672 }
2673 }
2674 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2675
2676 if (!irq_received)
2677 break;
2678
Chris Wilsona266c7d2012-04-24 22:59:44 +01002679 /* Consume port. Then clear IIR or we'll miss events */
2680 if ((I915_HAS_HOTPLUG(dev)) &&
2681 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2682 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02002683 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002684
2685 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2686 hotplug_status);
Daniel Vetter91d131d2013-06-27 17:52:14 +02002687
2688 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
2689
Chris Wilsona266c7d2012-04-24 22:59:44 +01002690 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
Chris Wilson38bde182012-04-24 22:59:50 +01002691 POSTING_READ(PORT_HOTPLUG_STAT);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002692 }
2693
Chris Wilson38bde182012-04-24 22:59:50 +01002694 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002695 new_iir = I915_READ(IIR); /* Flush posted writes */
2696
Chris Wilsona266c7d2012-04-24 22:59:44 +01002697 if (iir & I915_USER_INTERRUPT)
2698 notify_ring(dev, &dev_priv->ring[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002699
Chris Wilsona266c7d2012-04-24 22:59:44 +01002700 for_each_pipe(pipe) {
Chris Wilson38bde182012-04-24 22:59:50 +01002701 int plane = pipe;
2702 if (IS_MOBILE(dev))
2703 plane = !plane;
Ville Syrjälä5e2032d2013-02-19 15:16:38 +02002704
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002705 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2706 i915_handle_vblank(dev, plane, pipe, iir))
2707 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002708
2709 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2710 blc_event = true;
2711 }
2712
Chris Wilsona266c7d2012-04-24 22:59:44 +01002713 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2714 intel_opregion_asle_intr(dev);
2715
2716 /* With MSI, interrupts are only generated when iir
2717 * transitions from zero to nonzero. If another bit got
2718 * set while we were handling the existing iir bits, then
2719 * we would never get another interrupt.
2720 *
2721 * This is fine on non-MSI as well, as if we hit this path
2722 * we avoid exiting the interrupt handler only to generate
2723 * another one.
2724 *
2725 * Note that for MSI this could cause a stray interrupt report
2726 * if an interrupt landed in the time between writing IIR and
2727 * the posting read. This should be rare enough to never
2728 * trigger the 99% of 100,000 interrupts test for disabling
2729 * stray interrupts.
2730 */
Chris Wilson38bde182012-04-24 22:59:50 +01002731 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002732 iir = new_iir;
Chris Wilson38bde182012-04-24 22:59:50 +01002733 } while (iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002734
Daniel Vetterd05c6172012-04-26 23:28:09 +02002735 i915_update_dri1_breadcrumb(dev);
Chris Wilson8291ee92012-04-24 22:59:47 +01002736
Chris Wilsona266c7d2012-04-24 22:59:44 +01002737 return ret;
2738}
2739
2740static void i915_irq_uninstall(struct drm_device * dev)
2741{
2742 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2743 int pipe;
2744
Egbert Eichac4c16c2013-04-16 13:36:58 +02002745 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2746
Chris Wilsona266c7d2012-04-24 22:59:44 +01002747 if (I915_HAS_HOTPLUG(dev)) {
2748 I915_WRITE(PORT_HOTPLUG_EN, 0);
2749 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2750 }
2751
Chris Wilson00d98eb2012-04-24 22:59:48 +01002752 I915_WRITE16(HWSTAM, 0xffff);
Chris Wilson55b39752012-04-24 22:59:49 +01002753 for_each_pipe(pipe) {
2754 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01002755 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01002756 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2757 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01002758 I915_WRITE(IMR, 0xffffffff);
2759 I915_WRITE(IER, 0x0);
2760
Chris Wilsona266c7d2012-04-24 22:59:44 +01002761 I915_WRITE(IIR, I915_READ(IIR));
2762}
2763
2764static void i965_irq_preinstall(struct drm_device * dev)
2765{
2766 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2767 int pipe;
2768
2769 atomic_set(&dev_priv->irq_received, 0);
2770
Chris Wilsonadca4732012-05-11 18:01:31 +01002771 I915_WRITE(PORT_HOTPLUG_EN, 0);
2772 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01002773
2774 I915_WRITE(HWSTAM, 0xeffe);
2775 for_each_pipe(pipe)
2776 I915_WRITE(PIPESTAT(pipe), 0);
2777 I915_WRITE(IMR, 0xffffffff);
2778 I915_WRITE(IER, 0x0);
2779 POSTING_READ(IER);
2780}
2781
2782static int i965_irq_postinstall(struct drm_device *dev)
2783{
2784 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonbbba0a92012-04-24 22:59:51 +01002785 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002786 u32 error_mask;
Daniel Vetterb79480b2013-06-27 17:52:10 +02002787 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002788
Chris Wilsona266c7d2012-04-24 22:59:44 +01002789 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01002790 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01002791 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01002792 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2793 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2794 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2795 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2796 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2797
2798 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002799 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2800 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01002801 enable_mask |= I915_USER_INTERRUPT;
2802
2803 if (IS_G4X(dev))
2804 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002805
Daniel Vetterb79480b2013-06-27 17:52:10 +02002806 /* Interrupt setup is already guaranteed to be single-threaded, this is
2807 * just to make the assert_spin_locked check happy. */
2808 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01002809 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
Daniel Vetterb79480b2013-06-27 17:52:10 +02002810 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002811
Chris Wilsona266c7d2012-04-24 22:59:44 +01002812 /*
2813 * Enable some error detection, note the instruction error mask
2814 * bit is reserved, so we leave it masked.
2815 */
2816 if (IS_G4X(dev)) {
2817 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2818 GM45_ERROR_MEM_PRIV |
2819 GM45_ERROR_CP_PRIV |
2820 I915_ERROR_MEMORY_REFRESH);
2821 } else {
2822 error_mask = ~(I915_ERROR_PAGE_TABLE |
2823 I915_ERROR_MEMORY_REFRESH);
2824 }
2825 I915_WRITE(EMR, error_mask);
2826
2827 I915_WRITE(IMR, dev_priv->irq_mask);
2828 I915_WRITE(IER, enable_mask);
2829 POSTING_READ(IER);
2830
Daniel Vetter20afbda2012-12-11 14:05:07 +01002831 I915_WRITE(PORT_HOTPLUG_EN, 0);
2832 POSTING_READ(PORT_HOTPLUG_EN);
2833
Jani Nikulaf49e38d2013-04-29 13:02:54 +03002834 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01002835
2836 return 0;
2837}
2838
Egbert Eichbac56d52013-02-25 12:06:51 -05002839static void i915_hpd_irq_setup(struct drm_device *dev)
Daniel Vetter20afbda2012-12-11 14:05:07 +01002840{
2841 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Egbert Eiche5868a32013-02-28 04:17:12 -05002842 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +02002843 struct intel_encoder *intel_encoder;
Daniel Vetter20afbda2012-12-11 14:05:07 +01002844 u32 hotplug_en;
2845
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02002846 assert_spin_locked(&dev_priv->irq_lock);
2847
Egbert Eichbac56d52013-02-25 12:06:51 -05002848 if (I915_HAS_HOTPLUG(dev)) {
2849 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2850 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2851 /* Note HDMI and DP share hotplug bits */
Egbert Eiche5868a32013-02-28 04:17:12 -05002852 /* enable bits are the same for all generations */
Egbert Eichcd569ae2013-04-16 13:36:57 +02002853 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2854 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2855 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
Egbert Eichbac56d52013-02-25 12:06:51 -05002856 /* Programming the CRT detection parameters tends
2857 to generate a spurious hotplug event about three
2858 seconds later. So just do it once.
2859 */
2860 if (IS_G4X(dev))
2861 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Daniel Vetter85fc95b2013-03-27 15:47:11 +01002862 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
Egbert Eichbac56d52013-02-25 12:06:51 -05002863 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002864
Egbert Eichbac56d52013-02-25 12:06:51 -05002865 /* Ignore TV since it's buggy */
2866 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2867 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01002868}
2869
Daniel Vetterff1f5252012-10-02 15:10:55 +02002870static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01002871{
2872 struct drm_device *dev = (struct drm_device *) arg;
2873 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002874 u32 iir, new_iir;
2875 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01002876 unsigned long irqflags;
2877 int irq_received;
2878 int ret = IRQ_NONE, pipe;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002879 u32 flip_mask =
2880 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2881 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002882
2883 atomic_inc(&dev_priv->irq_received);
2884
2885 iir = I915_READ(IIR);
2886
Chris Wilsona266c7d2012-04-24 22:59:44 +01002887 for (;;) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01002888 bool blc_event = false;
2889
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002890 irq_received = (iir & ~flip_mask) != 0;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002891
2892 /* Can't rely on pipestat interrupt bit in iir as it might
2893 * have been cleared after the pipestat interrupt was received.
2894 * It doesn't set the bit in iir again, but it still produces
2895 * interrupts (for non-MSI).
2896 */
2897 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2898 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2899 i915_handle_error(dev, false);
2900
2901 for_each_pipe(pipe) {
2902 int reg = PIPESTAT(pipe);
2903 pipe_stats[pipe] = I915_READ(reg);
2904
2905 /*
2906 * Clear the PIPE*STAT regs before the IIR
2907 */
2908 if (pipe_stats[pipe] & 0x8000ffff) {
2909 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2910 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2911 pipe_name(pipe));
2912 I915_WRITE(reg, pipe_stats[pipe]);
2913 irq_received = 1;
2914 }
2915 }
2916 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2917
2918 if (!irq_received)
2919 break;
2920
2921 ret = IRQ_HANDLED;
2922
2923 /* Consume port. Then clear IIR or we'll miss events */
Chris Wilsonadca4732012-05-11 18:01:31 +01002924 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01002925 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02002926 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2927 HOTPLUG_INT_STATUS_G4X :
Daniel Vetter4f7fd702013-06-24 21:33:28 +02002928 HOTPLUG_INT_STATUS_I915);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002929
2930 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2931 hotplug_status);
Daniel Vetter91d131d2013-06-27 17:52:14 +02002932
2933 intel_hpd_irq_handler(dev, hotplug_trigger,
2934 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
2935
Chris Wilsona266c7d2012-04-24 22:59:44 +01002936 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2937 I915_READ(PORT_HOTPLUG_STAT);
2938 }
2939
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002940 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002941 new_iir = I915_READ(IIR); /* Flush posted writes */
2942
Chris Wilsona266c7d2012-04-24 22:59:44 +01002943 if (iir & I915_USER_INTERRUPT)
2944 notify_ring(dev, &dev_priv->ring[RCS]);
2945 if (iir & I915_BSD_USER_INTERRUPT)
2946 notify_ring(dev, &dev_priv->ring[VCS]);
2947
Chris Wilsona266c7d2012-04-24 22:59:44 +01002948 for_each_pipe(pipe) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01002949 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002950 i915_handle_vblank(dev, pipe, pipe, iir))
2951 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002952
2953 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2954 blc_event = true;
2955 }
2956
2957
2958 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2959 intel_opregion_asle_intr(dev);
2960
Daniel Vetter515ac2b2012-12-01 13:53:44 +01002961 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2962 gmbus_irq_handler(dev);
2963
Chris Wilsona266c7d2012-04-24 22:59:44 +01002964 /* With MSI, interrupts are only generated when iir
2965 * transitions from zero to nonzero. If another bit got
2966 * set while we were handling the existing iir bits, then
2967 * we would never get another interrupt.
2968 *
2969 * This is fine on non-MSI as well, as if we hit this path
2970 * we avoid exiting the interrupt handler only to generate
2971 * another one.
2972 *
2973 * Note that for MSI this could cause a stray interrupt report
2974 * if an interrupt landed in the time between writing IIR and
2975 * the posting read. This should be rare enough to never
2976 * trigger the 99% of 100,000 interrupts test for disabling
2977 * stray interrupts.
2978 */
2979 iir = new_iir;
2980 }
2981
Daniel Vetterd05c6172012-04-26 23:28:09 +02002982 i915_update_dri1_breadcrumb(dev);
Chris Wilson2c8ba292012-04-24 22:59:46 +01002983
Chris Wilsona266c7d2012-04-24 22:59:44 +01002984 return ret;
2985}
2986
2987static void i965_irq_uninstall(struct drm_device * dev)
2988{
2989 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2990 int pipe;
2991
2992 if (!dev_priv)
2993 return;
2994
Egbert Eichac4c16c2013-04-16 13:36:58 +02002995 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2996
Chris Wilsonadca4732012-05-11 18:01:31 +01002997 I915_WRITE(PORT_HOTPLUG_EN, 0);
2998 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01002999
3000 I915_WRITE(HWSTAM, 0xffffffff);
3001 for_each_pipe(pipe)
3002 I915_WRITE(PIPESTAT(pipe), 0);
3003 I915_WRITE(IMR, 0xffffffff);
3004 I915_WRITE(IER, 0x0);
3005
3006 for_each_pipe(pipe)
3007 I915_WRITE(PIPESTAT(pipe),
3008 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3009 I915_WRITE(IIR, I915_READ(IIR));
3010}
3011
Egbert Eichac4c16c2013-04-16 13:36:58 +02003012static void i915_reenable_hotplug_timer_func(unsigned long data)
3013{
3014 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3015 struct drm_device *dev = dev_priv->dev;
3016 struct drm_mode_config *mode_config = &dev->mode_config;
3017 unsigned long irqflags;
3018 int i;
3019
3020 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3021 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3022 struct drm_connector *connector;
3023
3024 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3025 continue;
3026
3027 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3028
3029 list_for_each_entry(connector, &mode_config->connector_list, head) {
3030 struct intel_connector *intel_connector = to_intel_connector(connector);
3031
3032 if (intel_connector->encoder->hpd_pin == i) {
3033 if (connector->polled != intel_connector->polled)
3034 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3035 drm_get_connector_name(connector));
3036 connector->polled = intel_connector->polled;
3037 if (!connector->polled)
3038 connector->polled = DRM_CONNECTOR_POLL_HPD;
3039 }
3040 }
3041 }
3042 if (dev_priv->display.hpd_irq_setup)
3043 dev_priv->display.hpd_irq_setup(dev);
3044 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3045}
3046
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003047void intel_irq_init(struct drm_device *dev)
3048{
Chris Wilson8b2e3262012-04-24 22:59:41 +01003049 struct drm_i915_private *dev_priv = dev->dev_private;
3050
3051 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
Daniel Vetter99584db2012-11-14 17:14:04 +01003052 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003053 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01003054 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Chris Wilson8b2e3262012-04-24 22:59:41 +01003055
Daniel Vetter99584db2012-11-14 17:14:04 +01003056 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3057 i915_hangcheck_elapsed,
Daniel Vetter61bac782012-12-01 21:03:21 +01003058 (unsigned long) dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003059 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3060 (unsigned long) dev_priv);
Daniel Vetter61bac782012-12-01 21:03:21 +01003061
Tomas Janousek97a19a22012-12-08 13:48:13 +01003062 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01003063
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003064 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3065 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Eugeni Dodonov7d4e1462012-05-09 15:37:09 -03003066 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003067 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3068 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3069 }
3070
Keith Packardc3613de2011-08-12 17:05:54 -07003071 if (drm_core_check_feature(dev, DRIVER_MODESET))
3072 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3073 else
3074 dev->driver->get_vblank_timestamp = NULL;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003075 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3076
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003077 if (IS_VALLEYVIEW(dev)) {
3078 dev->driver->irq_handler = valleyview_irq_handler;
3079 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3080 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3081 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3082 dev->driver->enable_vblank = valleyview_enable_vblank;
3083 dev->driver->disable_vblank = valleyview_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05003084 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003085 } else if (HAS_PCH_SPLIT(dev)) {
3086 dev->driver->irq_handler = ironlake_irq_handler;
3087 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3088 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3089 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3090 dev->driver->enable_vblank = ironlake_enable_vblank;
3091 dev->driver->disable_vblank = ironlake_disable_vblank;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003092 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003093 } else {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003094 if (INTEL_INFO(dev)->gen == 2) {
3095 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3096 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3097 dev->driver->irq_handler = i8xx_irq_handler;
3098 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003099 } else if (INTEL_INFO(dev)->gen == 3) {
3100 dev->driver->irq_preinstall = i915_irq_preinstall;
3101 dev->driver->irq_postinstall = i915_irq_postinstall;
3102 dev->driver->irq_uninstall = i915_irq_uninstall;
3103 dev->driver->irq_handler = i915_irq_handler;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003104 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003105 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003106 dev->driver->irq_preinstall = i965_irq_preinstall;
3107 dev->driver->irq_postinstall = i965_irq_postinstall;
3108 dev->driver->irq_uninstall = i965_irq_uninstall;
3109 dev->driver->irq_handler = i965_irq_handler;
Egbert Eichbac56d52013-02-25 12:06:51 -05003110 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003111 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003112 dev->driver->enable_vblank = i915_enable_vblank;
3113 dev->driver->disable_vblank = i915_disable_vblank;
3114 }
3115}
Daniel Vetter20afbda2012-12-11 14:05:07 +01003116
3117void intel_hpd_init(struct drm_device *dev)
3118{
3119 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eich821450c2013-04-16 13:36:55 +02003120 struct drm_mode_config *mode_config = &dev->mode_config;
3121 struct drm_connector *connector;
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003122 unsigned long irqflags;
Egbert Eich821450c2013-04-16 13:36:55 +02003123 int i;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003124
Egbert Eich821450c2013-04-16 13:36:55 +02003125 for (i = 1; i < HPD_NUM_PINS; i++) {
3126 dev_priv->hpd_stats[i].hpd_cnt = 0;
3127 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3128 }
3129 list_for_each_entry(connector, &mode_config->connector_list, head) {
3130 struct intel_connector *intel_connector = to_intel_connector(connector);
3131 connector->polled = intel_connector->polled;
3132 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3133 connector->polled = DRM_CONNECTOR_POLL_HPD;
3134 }
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003135
3136 /* Interrupt setup is already guaranteed to be single-threaded, this is
3137 * just to make the assert_spin_locked checks happy. */
3138 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003139 if (dev_priv->display.hpd_irq_setup)
3140 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003141 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003142}