blob: 6bd4508666d274298b5203a27afb938df439bd86 [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
34#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010036#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Egbert Eiche5868a32013-02-28 04:17:12 -050039static const u32 hpd_ibx[] = {
40 [HPD_CRT] = SDE_CRT_HOTPLUG,
41 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
45};
46
47static const u32 hpd_cpt[] = {
48 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010049 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050050 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53};
54
55static const u32 hpd_mask_i915[] = {
56 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62};
63
64static const u32 hpd_status_gen4[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71};
72
Egbert Eiche5868a32013-02-28 04:17:12 -050073static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80};
81
Zhenyu Wang036a4a72009-06-08 14:40:19 +080082/* For display hotplug interrupt */
Chris Wilson995b6762010-08-20 13:23:26 +010083static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -050084ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +080085{
Daniel Vetter4bc9d432013-06-27 13:44:58 +020086 assert_spin_locked(&dev_priv->irq_lock);
87
Chris Wilson1ec14ad2010-12-04 11:30:53 +000088 if ((dev_priv->irq_mask & mask) != 0) {
89 dev_priv->irq_mask &= ~mask;
90 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +000091 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +080092 }
93}
94
Paulo Zanoni0ff98002013-02-22 17:05:31 -030095static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -050096ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +080097{
Daniel Vetter4bc9d432013-06-27 13:44:58 +020098 assert_spin_locked(&dev_priv->irq_lock);
99
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000100 if ((dev_priv->irq_mask & mask) != mask) {
101 dev_priv->irq_mask |= mask;
102 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000103 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800104 }
105}
106
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300107/**
108 * ilk_update_gt_irq - update GTIMR
109 * @dev_priv: driver private
110 * @interrupt_mask: mask of interrupt bits to update
111 * @enabled_irq_mask: mask of interrupt bits to enable
112 */
113static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
114 uint32_t interrupt_mask,
115 uint32_t enabled_irq_mask)
116{
117 assert_spin_locked(&dev_priv->irq_lock);
118
119 dev_priv->gt_irq_mask &= ~interrupt_mask;
120 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
121 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
122 POSTING_READ(GTIMR);
123}
124
125void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
126{
127 ilk_update_gt_irq(dev_priv, mask, mask);
128}
129
130void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
131{
132 ilk_update_gt_irq(dev_priv, mask, 0);
133}
134
Paulo Zanoni86642812013-04-12 17:57:57 -0300135static bool ivb_can_enable_err_int(struct drm_device *dev)
136{
137 struct drm_i915_private *dev_priv = dev->dev_private;
138 struct intel_crtc *crtc;
139 enum pipe pipe;
140
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200141 assert_spin_locked(&dev_priv->irq_lock);
142
Paulo Zanoni86642812013-04-12 17:57:57 -0300143 for_each_pipe(pipe) {
144 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
145
146 if (crtc->cpu_fifo_underrun_disabled)
147 return false;
148 }
149
150 return true;
151}
152
153static bool cpt_can_enable_serr_int(struct drm_device *dev)
154{
155 struct drm_i915_private *dev_priv = dev->dev_private;
156 enum pipe pipe;
157 struct intel_crtc *crtc;
158
Daniel Vetterfee884e2013-07-04 23:35:21 +0200159 assert_spin_locked(&dev_priv->irq_lock);
160
Paulo Zanoni86642812013-04-12 17:57:57 -0300161 for_each_pipe(pipe) {
162 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
163
164 if (crtc->pch_fifo_underrun_disabled)
165 return false;
166 }
167
168 return true;
169}
170
171static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
172 enum pipe pipe, bool enable)
173{
174 struct drm_i915_private *dev_priv = dev->dev_private;
175 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
176 DE_PIPEB_FIFO_UNDERRUN;
177
178 if (enable)
179 ironlake_enable_display_irq(dev_priv, bit);
180 else
181 ironlake_disable_display_irq(dev_priv, bit);
182}
183
184static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
Daniel Vetter7336df62013-07-09 22:59:16 +0200185 enum pipe pipe, bool enable)
Paulo Zanoni86642812013-04-12 17:57:57 -0300186{
187 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni86642812013-04-12 17:57:57 -0300188 if (enable) {
Daniel Vetter7336df62013-07-09 22:59:16 +0200189 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
190
Paulo Zanoni86642812013-04-12 17:57:57 -0300191 if (!ivb_can_enable_err_int(dev))
192 return;
193
Paulo Zanoni86642812013-04-12 17:57:57 -0300194 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
195 } else {
Daniel Vetter7336df62013-07-09 22:59:16 +0200196 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
197
198 /* Change the state _after_ we've read out the current one. */
Paulo Zanoni86642812013-04-12 17:57:57 -0300199 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
Daniel Vetter7336df62013-07-09 22:59:16 +0200200
201 if (!was_enabled &&
202 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
203 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
204 pipe_name(pipe));
205 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300206 }
207}
208
Daniel Vetterfee884e2013-07-04 23:35:21 +0200209/**
210 * ibx_display_interrupt_update - update SDEIMR
211 * @dev_priv: driver private
212 * @interrupt_mask: mask of interrupt bits to update
213 * @enabled_irq_mask: mask of interrupt bits to enable
214 */
215static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
216 uint32_t interrupt_mask,
217 uint32_t enabled_irq_mask)
218{
219 uint32_t sdeimr = I915_READ(SDEIMR);
220 sdeimr &= ~interrupt_mask;
221 sdeimr |= (~enabled_irq_mask & interrupt_mask);
222
223 assert_spin_locked(&dev_priv->irq_lock);
224
225 I915_WRITE(SDEIMR, sdeimr);
226 POSTING_READ(SDEIMR);
227}
228#define ibx_enable_display_interrupt(dev_priv, bits) \
229 ibx_display_interrupt_update((dev_priv), (bits), (bits))
230#define ibx_disable_display_interrupt(dev_priv, bits) \
231 ibx_display_interrupt_update((dev_priv), (bits), 0)
232
Daniel Vetterde280752013-07-04 23:35:24 +0200233static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
234 enum transcoder pch_transcoder,
Paulo Zanoni86642812013-04-12 17:57:57 -0300235 bool enable)
236{
Paulo Zanoni86642812013-04-12 17:57:57 -0300237 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200238 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
239 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
Paulo Zanoni86642812013-04-12 17:57:57 -0300240
241 if (enable)
Daniel Vetterfee884e2013-07-04 23:35:21 +0200242 ibx_enable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300243 else
Daniel Vetterfee884e2013-07-04 23:35:21 +0200244 ibx_disable_display_interrupt(dev_priv, bit);
Paulo Zanoni86642812013-04-12 17:57:57 -0300245}
246
247static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
248 enum transcoder pch_transcoder,
249 bool enable)
250{
251 struct drm_i915_private *dev_priv = dev->dev_private;
252
253 if (enable) {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200254 I915_WRITE(SERR_INT,
255 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
256
Paulo Zanoni86642812013-04-12 17:57:57 -0300257 if (!cpt_can_enable_serr_int(dev))
258 return;
259
Daniel Vetterfee884e2013-07-04 23:35:21 +0200260 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Paulo Zanoni86642812013-04-12 17:57:57 -0300261 } else {
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200262 uint32_t tmp = I915_READ(SERR_INT);
263 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
264
265 /* Change the state _after_ we've read out the current one. */
Daniel Vetterfee884e2013-07-04 23:35:21 +0200266 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
Daniel Vetter1dd246f2013-07-10 08:30:23 +0200267
268 if (!was_enabled &&
269 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
270 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
271 transcoder_name(pch_transcoder));
272 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300273 }
Paulo Zanoni86642812013-04-12 17:57:57 -0300274}
275
276/**
277 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
278 * @dev: drm device
279 * @pipe: pipe
280 * @enable: true if we want to report FIFO underrun errors, false otherwise
281 *
282 * This function makes us disable or enable CPU fifo underruns for a specific
283 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
284 * reporting for one pipe may also disable all the other CPU error interruts for
285 * the other pipes, due to the fact that there's just one interrupt mask/enable
286 * bit for all the pipes.
287 *
288 * Returns the previous state of underrun reporting.
289 */
290bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
291 enum pipe pipe, bool enable)
292{
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
295 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
296 unsigned long flags;
297 bool ret;
298
299 spin_lock_irqsave(&dev_priv->irq_lock, flags);
300
301 ret = !intel_crtc->cpu_fifo_underrun_disabled;
302
303 if (enable == ret)
304 goto done;
305
306 intel_crtc->cpu_fifo_underrun_disabled = !enable;
307
308 if (IS_GEN5(dev) || IS_GEN6(dev))
309 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
310 else if (IS_GEN7(dev))
Daniel Vetter7336df62013-07-09 22:59:16 +0200311 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300312
313done:
314 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
315 return ret;
316}
317
318/**
319 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
320 * @dev: drm device
321 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
322 * @enable: true if we want to report FIFO underrun errors, false otherwise
323 *
324 * This function makes us disable or enable PCH fifo underruns for a specific
325 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
326 * underrun reporting for one transcoder may also disable all the other PCH
327 * error interruts for the other transcoders, due to the fact that there's just
328 * one interrupt mask/enable bit for all the transcoders.
329 *
330 * Returns the previous state of underrun reporting.
331 */
332bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
333 enum transcoder pch_transcoder,
334 bool enable)
335{
336 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterde280752013-07-04 23:35:24 +0200337 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
338 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni86642812013-04-12 17:57:57 -0300339 unsigned long flags;
340 bool ret;
341
Daniel Vetterde280752013-07-04 23:35:24 +0200342 /*
343 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
344 * has only one pch transcoder A that all pipes can use. To avoid racy
345 * pch transcoder -> pipe lookups from interrupt code simply store the
346 * underrun statistics in crtc A. Since we never expose this anywhere
347 * nor use it outside of the fifo underrun code here using the "wrong"
348 * crtc on LPT won't cause issues.
349 */
Paulo Zanoni86642812013-04-12 17:57:57 -0300350
351 spin_lock_irqsave(&dev_priv->irq_lock, flags);
352
353 ret = !intel_crtc->pch_fifo_underrun_disabled;
354
355 if (enable == ret)
356 goto done;
357
358 intel_crtc->pch_fifo_underrun_disabled = !enable;
359
360 if (HAS_PCH_IBX(dev))
Daniel Vetterde280752013-07-04 23:35:24 +0200361 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
Paulo Zanoni86642812013-04-12 17:57:57 -0300362 else
363 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
364
365done:
366 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
367 return ret;
368}
369
370
Keith Packard7c463582008-11-04 02:03:27 -0800371void
372i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
373{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200374 u32 reg = PIPESTAT(pipe);
375 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800376
Daniel Vetterb79480b2013-06-27 17:52:10 +0200377 assert_spin_locked(&dev_priv->irq_lock);
378
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200379 if ((pipestat & mask) == mask)
380 return;
381
382 /* Enable the interrupt, clear any pending status */
383 pipestat |= mask | (mask >> 16);
384 I915_WRITE(reg, pipestat);
385 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800386}
387
388void
389i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
390{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200391 u32 reg = PIPESTAT(pipe);
392 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800393
Daniel Vetterb79480b2013-06-27 17:52:10 +0200394 assert_spin_locked(&dev_priv->irq_lock);
395
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200396 if ((pipestat & mask) == 0)
397 return;
398
399 pipestat &= ~mask;
400 I915_WRITE(reg, pipestat);
401 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800402}
403
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000404/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300405 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Zhao Yakui01c66882009-10-28 05:10:00 +0000406 */
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300407static void i915_enable_asle_pipestat(struct drm_device *dev)
Zhao Yakui01c66882009-10-28 05:10:00 +0000408{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000409 drm_i915_private_t *dev_priv = dev->dev_private;
410 unsigned long irqflags;
411
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300412 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
413 return;
414
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000415 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000416
Jani Nikulaf8987802013-04-29 13:02:53 +0300417 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
418 if (INTEL_INFO(dev)->gen >= 4)
419 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000420
421 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000422}
423
424/**
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700425 * i915_pipe_enabled - check if a pipe is enabled
426 * @dev: DRM device
427 * @pipe: pipe to check
428 *
429 * Reading certain registers when the pipe is disabled can hang the chip.
430 * Use this routine to make sure the PLL is running and the pipe is active
431 * before reading such registers if unsure.
432 */
433static int
434i915_pipe_enabled(struct drm_device *dev, int pipe)
435{
436 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni702e7a52012-10-23 18:29:59 -0200437
Daniel Vettera01025a2013-05-22 00:50:23 +0200438 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
439 /* Locking is horribly broken here, but whatever. */
440 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
441 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni71f8ba62013-05-03 12:15:39 -0300442
Daniel Vettera01025a2013-05-22 00:50:23 +0200443 return intel_crtc->active;
444 } else {
445 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
446 }
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700447}
448
Keith Packard42f52ef2008-10-18 19:39:29 -0700449/* Called from drm generic code, passed a 'crtc', which
450 * we use as a pipe index
451 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700452static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700453{
454 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
455 unsigned long high_frame;
456 unsigned long low_frame;
Chris Wilson5eddb702010-09-11 13:48:45 +0100457 u32 high1, high2, low;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700458
459 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800460 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800461 "pipe %c\n", pipe_name(pipe));
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700462 return 0;
463 }
464
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800465 high_frame = PIPEFRAME(pipe);
466 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100467
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700468 /*
469 * High & low register fields aren't synchronized, so make sure
470 * we get a low value that's stable across two reads of the high
471 * register.
472 */
473 do {
Chris Wilson5eddb702010-09-11 13:48:45 +0100474 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
475 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
476 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700477 } while (high1 != high2);
478
Chris Wilson5eddb702010-09-11 13:48:45 +0100479 high1 >>= PIPE_FRAME_HIGH_SHIFT;
480 low >>= PIPE_FRAME_LOW_SHIFT;
481 return (high1 << 8) | low;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700482}
483
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700484static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800485{
486 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800487 int reg = PIPE_FRMCOUNT_GM45(pipe);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800488
489 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800490 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800491 "pipe %c\n", pipe_name(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800492 return 0;
493 }
494
495 return I915_READ(reg);
496}
497
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700498static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100499 int *vpos, int *hpos)
500{
501 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
502 u32 vbl = 0, position = 0;
503 int vbl_start, vbl_end, htotal, vtotal;
504 bool in_vbl = true;
505 int ret = 0;
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200506 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
507 pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100508
509 if (!i915_pipe_enabled(dev, pipe)) {
510 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800511 "pipe %c\n", pipe_name(pipe));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100512 return 0;
513 }
514
515 /* Get vtotal. */
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200516 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100517
518 if (INTEL_INFO(dev)->gen >= 4) {
519 /* No obvious pixelcount register. Only query vertical
520 * scanout position from Display scan line register.
521 */
522 position = I915_READ(PIPEDSL(pipe));
523
524 /* Decode into vertical scanout position. Don't have
525 * horizontal scanout position.
526 */
527 *vpos = position & 0x1fff;
528 *hpos = 0;
529 } else {
530 /* Have access to pixelcount since start of frame.
531 * We can split this into vertical and horizontal
532 * scanout position.
533 */
534 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
535
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200536 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100537 *vpos = position / htotal;
538 *hpos = position - (*vpos * htotal);
539 }
540
541 /* Query vblank area. */
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200542 vbl = I915_READ(VBLANK(cpu_transcoder));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100543
544 /* Test position against vblank region. */
545 vbl_start = vbl & 0x1fff;
546 vbl_end = (vbl >> 16) & 0x1fff;
547
548 if ((*vpos < vbl_start) || (*vpos > vbl_end))
549 in_vbl = false;
550
551 /* Inside "upper part" of vblank area? Apply corrective offset: */
552 if (in_vbl && (*vpos >= vbl_start))
553 *vpos = *vpos - vtotal;
554
555 /* Readouts valid? */
556 if (vbl > 0)
557 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
558
559 /* In vblank? */
560 if (in_vbl)
561 ret |= DRM_SCANOUTPOS_INVBL;
562
563 return ret;
564}
565
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700566static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100567 int *max_error,
568 struct timeval *vblank_time,
569 unsigned flags)
570{
Chris Wilson4041b852011-01-22 10:07:56 +0000571 struct drm_crtc *crtc;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100572
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700573 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
Chris Wilson4041b852011-01-22 10:07:56 +0000574 DRM_ERROR("Invalid crtc %d\n", pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100575 return -EINVAL;
576 }
577
578 /* Get drm_crtc to timestamp: */
Chris Wilson4041b852011-01-22 10:07:56 +0000579 crtc = intel_get_crtc_for_pipe(dev, pipe);
580 if (crtc == NULL) {
581 DRM_ERROR("Invalid crtc %d\n", pipe);
582 return -EINVAL;
583 }
584
585 if (!crtc->enabled) {
586 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
587 return -EBUSY;
588 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100589
590 /* Helper routine in DRM core does all the work: */
Chris Wilson4041b852011-01-22 10:07:56 +0000591 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
592 vblank_time, flags,
593 crtc);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100594}
595
Egbert Eich321a1b32013-04-11 16:00:26 +0200596static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
597{
598 enum drm_connector_status old_status;
599
600 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
601 old_status = connector->status;
602
603 connector->status = connector->funcs->detect(connector, false);
604 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
605 connector->base.id,
606 drm_get_connector_name(connector),
607 old_status, connector->status);
608 return (old_status != connector->status);
609}
610
Jesse Barnes5ca58282009-03-31 14:11:15 -0700611/*
612 * Handle hotplug events outside the interrupt handler proper.
613 */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200614#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
615
Jesse Barnes5ca58282009-03-31 14:11:15 -0700616static void i915_hotplug_work_func(struct work_struct *work)
617{
618 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
619 hotplug_work);
620 struct drm_device *dev = dev_priv->dev;
Keith Packardc31c4ba2009-05-06 11:48:58 -0700621 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200622 struct intel_connector *intel_connector;
623 struct intel_encoder *intel_encoder;
624 struct drm_connector *connector;
625 unsigned long irqflags;
626 bool hpd_disabled = false;
Egbert Eich321a1b32013-04-11 16:00:26 +0200627 bool changed = false;
Egbert Eich142e2392013-04-11 15:57:57 +0200628 u32 hpd_event_bits;
Jesse Barnes5ca58282009-03-31 14:11:15 -0700629
Daniel Vetter52d7ece2012-12-01 21:03:22 +0100630 /* HPD irq before everything is fully set up. */
631 if (!dev_priv->enable_hotplug_processing)
632 return;
633
Keith Packarda65e34c2011-07-25 10:04:56 -0700634 mutex_lock(&mode_config->mutex);
Jesse Barnese67189ab2011-02-11 14:44:51 -0800635 DRM_DEBUG_KMS("running encoder hotplug functions\n");
636
Egbert Eichcd569ae2013-04-16 13:36:57 +0200637 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Egbert Eich142e2392013-04-11 15:57:57 +0200638
639 hpd_event_bits = dev_priv->hpd_event_bits;
640 dev_priv->hpd_event_bits = 0;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200641 list_for_each_entry(connector, &mode_config->connector_list, head) {
642 intel_connector = to_intel_connector(connector);
643 intel_encoder = intel_connector->encoder;
644 if (intel_encoder->hpd_pin > HPD_NONE &&
645 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
646 connector->polled == DRM_CONNECTOR_POLL_HPD) {
647 DRM_INFO("HPD interrupt storm detected on connector %s: "
648 "switching from hotplug detection to polling\n",
649 drm_get_connector_name(connector));
650 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
651 connector->polled = DRM_CONNECTOR_POLL_CONNECT
652 | DRM_CONNECTOR_POLL_DISCONNECT;
653 hpd_disabled = true;
654 }
Egbert Eich142e2392013-04-11 15:57:57 +0200655 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
656 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
657 drm_get_connector_name(connector), intel_encoder->hpd_pin);
658 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200659 }
660 /* if there were no outputs to poll, poll was disabled,
661 * therefore make sure it's enabled when disabling HPD on
662 * some connectors */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200663 if (hpd_disabled) {
Egbert Eichcd569ae2013-04-16 13:36:57 +0200664 drm_kms_helper_poll_enable(dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +0200665 mod_timer(&dev_priv->hotplug_reenable_timer,
666 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
667 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200668
669 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
670
Egbert Eich321a1b32013-04-11 16:00:26 +0200671 list_for_each_entry(connector, &mode_config->connector_list, head) {
672 intel_connector = to_intel_connector(connector);
673 intel_encoder = intel_connector->encoder;
674 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
675 if (intel_encoder->hot_plug)
676 intel_encoder->hot_plug(intel_encoder);
677 if (intel_hpd_irq_event(dev, connector))
678 changed = true;
679 }
680 }
Keith Packard40ee3382011-07-28 15:31:19 -0700681 mutex_unlock(&mode_config->mutex);
682
Egbert Eich321a1b32013-04-11 16:00:26 +0200683 if (changed)
684 drm_kms_helper_hotplug_event(dev);
Jesse Barnes5ca58282009-03-31 14:11:15 -0700685}
686
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200687static void ironlake_rps_change_irq_handler(struct drm_device *dev)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800688{
689 drm_i915_private_t *dev_priv = dev->dev_private;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000690 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +0200691 u8 new_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200692
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200693 spin_lock(&mchdev_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800694
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200695 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
696
Daniel Vetter20e4d402012-08-08 23:35:39 +0200697 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200698
Jesse Barnes7648fa92010-05-20 14:28:11 -0700699 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000700 busy_up = I915_READ(RCPREVBSYTUPAVG);
701 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800702 max_avg = I915_READ(RCBMAXAVG);
703 min_avg = I915_READ(RCBMINAVG);
704
705 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000706 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200707 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
708 new_delay = dev_priv->ips.cur_delay - 1;
709 if (new_delay < dev_priv->ips.max_delay)
710 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000711 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200712 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
713 new_delay = dev_priv->ips.cur_delay + 1;
714 if (new_delay > dev_priv->ips.min_delay)
715 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800716 }
717
Jesse Barnes7648fa92010-05-20 14:28:11 -0700718 if (ironlake_set_drps(dev, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +0200719 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800720
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200721 spin_unlock(&mchdev_lock);
Daniel Vetter92703882012-08-09 16:46:01 +0200722
Jesse Barnesf97108d2010-01-29 11:27:07 -0800723 return;
724}
725
Chris Wilson549f7362010-10-19 11:19:32 +0100726static void notify_ring(struct drm_device *dev,
727 struct intel_ring_buffer *ring)
728{
Chris Wilson475553d2011-01-20 09:52:56 +0000729 if (ring->obj == NULL)
730 return;
731
Chris Wilsonb2eadbc2012-08-09 10:58:30 +0100732 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
Chris Wilson9862e602011-01-04 22:22:17 +0000733
Chris Wilson549f7362010-10-19 11:19:32 +0100734 wake_up_all(&ring->irq_queue);
Mika Kuoppala10cd45b2013-07-03 17:22:08 +0300735 i915_queue_hangcheck(dev);
Chris Wilson549f7362010-10-19 11:19:32 +0100736}
737
Ben Widawsky4912d042011-04-25 11:25:20 -0700738static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800739{
Ben Widawsky4912d042011-04-25 11:25:20 -0700740 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200741 rps.work);
Ben Widawsky4912d042011-04-25 11:25:20 -0700742 u32 pm_iir, pm_imr;
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100743 u8 new_delay;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800744
Daniel Vetter59cdb632013-07-04 23:35:28 +0200745 spin_lock_irq(&dev_priv->irq_lock);
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200746 pm_iir = dev_priv->rps.pm_iir;
747 dev_priv->rps.pm_iir = 0;
Ben Widawsky4912d042011-04-25 11:25:20 -0700748 pm_imr = I915_READ(GEN6_PMIMR);
Ben Widawsky48484052013-05-28 19:22:27 -0700749 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
750 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
Daniel Vetter59cdb632013-07-04 23:35:28 +0200751 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawsky4912d042011-04-25 11:25:20 -0700752
Ben Widawsky48484052013-05-28 19:22:27 -0700753 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800754 return;
755
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700756 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100757
Ville Syrjälä74250342013-06-25 21:38:11 +0300758 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200759 new_delay = dev_priv->rps.cur_delay + 1;
Ville Syrjälä74250342013-06-25 21:38:11 +0300760
761 /*
762 * For better performance, jump directly
763 * to RPe if we're below it.
764 */
765 if (IS_VALLEYVIEW(dev_priv->dev) &&
766 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
767 new_delay = dev_priv->rps.rpe_delay;
768 } else
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200769 new_delay = dev_priv->rps.cur_delay - 1;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800770
Ben Widawsky79249632012-09-07 19:43:42 -0700771 /* sysfs frequency interfaces may have snuck in while servicing the
772 * interrupt
773 */
Ville Syrjäläd8289c92013-06-25 19:21:05 +0300774 if (new_delay >= dev_priv->rps.min_delay &&
775 new_delay <= dev_priv->rps.max_delay) {
Jesse Barnes0a073b82013-04-17 15:54:58 -0700776 if (IS_VALLEYVIEW(dev_priv->dev))
777 valleyview_set_rps(dev_priv->dev, new_delay);
778 else
779 gen6_set_rps(dev_priv->dev, new_delay);
Ben Widawsky79249632012-09-07 19:43:42 -0700780 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800781
Jesse Barnes52ceb902013-04-23 10:09:26 -0700782 if (IS_VALLEYVIEW(dev_priv->dev)) {
783 /*
784 * On VLV, when we enter RC6 we may not be at the minimum
785 * voltage level, so arm a timer to check. It should only
786 * fire when there's activity or once after we've entered
787 * RC6, and then won't be re-armed until the next RPS interrupt.
788 */
789 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
790 msecs_to_jiffies(100));
791 }
792
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700793 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800794}
795
Ben Widawskye3689192012-05-25 16:56:22 -0700796
797/**
798 * ivybridge_parity_work - Workqueue called when a parity error interrupt
799 * occurred.
800 * @work: workqueue struct
801 *
802 * Doesn't actually do anything except notify userspace. As a consequence of
803 * this event, userspace should try to remap the bad rows since statistically
804 * it is likely the same row is more likely to go bad again.
805 */
806static void ivybridge_parity_work(struct work_struct *work)
807{
808 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vettera4da4fa2012-11-02 19:55:07 +0100809 l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -0700810 u32 error_status, row, bank, subbank;
811 char *parity_event[5];
812 uint32_t misccpctl;
813 unsigned long flags;
814
815 /* We must turn off DOP level clock gating to access the L3 registers.
816 * In order to prevent a get/put style interface, acquire struct mutex
817 * any time we access those registers.
818 */
819 mutex_lock(&dev_priv->dev->struct_mutex);
820
821 misccpctl = I915_READ(GEN7_MISCCPCTL);
822 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
823 POSTING_READ(GEN7_MISCCPCTL);
824
825 error_status = I915_READ(GEN7_L3CDERRST1);
826 row = GEN7_PARITY_ERROR_ROW(error_status);
827 bank = GEN7_PARITY_ERROR_BANK(error_status);
828 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
829
830 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
831 GEN7_L3CDERRST1_ENABLE);
832 POSTING_READ(GEN7_L3CDERRST1);
833
834 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
835
836 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300837 ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
Ben Widawskye3689192012-05-25 16:56:22 -0700838 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
839
840 mutex_unlock(&dev_priv->dev->struct_mutex);
841
Ben Widawskycce723e2013-07-19 09:16:42 -0700842 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
Ben Widawskye3689192012-05-25 16:56:22 -0700843 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
844 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
845 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
846 parity_event[4] = NULL;
847
848 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
849 KOBJ_CHANGE, parity_event);
850
851 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
852 row, bank, subbank);
853
854 kfree(parity_event[3]);
855 kfree(parity_event[2]);
856 kfree(parity_event[1]);
857}
858
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200859static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
Ben Widawskye3689192012-05-25 16:56:22 -0700860{
861 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Ben Widawskye3689192012-05-25 16:56:22 -0700862
Ben Widawskye1ef7cc2012-07-24 20:47:31 -0700863 if (!HAS_L3_GPU_CACHE(dev))
Ben Widawskye3689192012-05-25 16:56:22 -0700864 return;
865
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200866 spin_lock(&dev_priv->irq_lock);
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300867 ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200868 spin_unlock(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -0700869
Daniel Vettera4da4fa2012-11-02 19:55:07 +0100870 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -0700871}
872
Paulo Zanonif1af8fc2013-07-12 19:56:30 -0300873static void ilk_gt_irq_handler(struct drm_device *dev,
874 struct drm_i915_private *dev_priv,
875 u32 gt_iir)
876{
877 if (gt_iir &
878 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
879 notify_ring(dev, &dev_priv->ring[RCS]);
880 if (gt_iir & ILK_BSD_USER_INTERRUPT)
881 notify_ring(dev, &dev_priv->ring[VCS]);
882}
883
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200884static void snb_gt_irq_handler(struct drm_device *dev,
885 struct drm_i915_private *dev_priv,
886 u32 gt_iir)
887{
888
Ben Widawskycc609d52013-05-28 19:22:29 -0700889 if (gt_iir &
890 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200891 notify_ring(dev, &dev_priv->ring[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -0700892 if (gt_iir & GT_BSD_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200893 notify_ring(dev, &dev_priv->ring[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -0700894 if (gt_iir & GT_BLT_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200895 notify_ring(dev, &dev_priv->ring[BCS]);
896
Ben Widawskycc609d52013-05-28 19:22:29 -0700897 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
898 GT_BSD_CS_ERROR_INTERRUPT |
899 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200900 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
901 i915_handle_error(dev, false);
902 }
Ben Widawskye3689192012-05-25 16:56:22 -0700903
Ben Widawskycc609d52013-05-28 19:22:29 -0700904 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200905 ivybridge_parity_error_irq_handler(dev);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200906}
907
Ben Widawskybaf02a12013-05-28 19:22:24 -0700908/* Legacy way of handling PM interrupts */
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200909static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
910 u32 pm_iir)
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100911{
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100912 /*
913 * IIR bits should never already be set because IMR should
914 * prevent an interrupt from being shown in IIR. The warning
915 * displays a case where we've unsafely cleared
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200916 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100917 * type is not a problem, it displays a problem in the logic.
918 *
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200919 * The mask bit in IMR is cleared by dev_priv->rps.work.
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100920 */
921
Daniel Vetter59cdb632013-07-04 23:35:28 +0200922 spin_lock(&dev_priv->irq_lock);
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200923 dev_priv->rps.pm_iir |= pm_iir;
924 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100925 POSTING_READ(GEN6_PMIMR);
Daniel Vetter59cdb632013-07-04 23:35:28 +0200926 spin_unlock(&dev_priv->irq_lock);
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100927
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200928 queue_work(dev_priv->wq, &dev_priv->rps.work);
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100929}
930
Egbert Eichb543fb02013-04-16 13:36:54 +0200931#define HPD_STORM_DETECT_PERIOD 1000
932#define HPD_STORM_THRESHOLD 5
933
Daniel Vetter10a504d2013-06-27 17:52:12 +0200934static inline void intel_hpd_irq_handler(struct drm_device *dev,
Daniel Vetter22062db2013-06-27 17:52:11 +0200935 u32 hotplug_trigger,
936 const u32 *hpd)
Egbert Eichb543fb02013-04-16 13:36:54 +0200937{
938 drm_i915_private_t *dev_priv = dev->dev_private;
Egbert Eichb543fb02013-04-16 13:36:54 +0200939 int i;
Daniel Vetter10a504d2013-06-27 17:52:12 +0200940 bool storm_detected = false;
Egbert Eichb543fb02013-04-16 13:36:54 +0200941
Daniel Vetter91d131d2013-06-27 17:52:14 +0200942 if (!hotplug_trigger)
943 return;
944
Daniel Vetterb5ea2d52013-06-27 17:52:15 +0200945 spin_lock(&dev_priv->irq_lock);
Egbert Eichb543fb02013-04-16 13:36:54 +0200946 for (i = 1; i < HPD_NUM_PINS; i++) {
Egbert Eich821450c2013-04-16 13:36:55 +0200947
Egbert Eichb8f102e2013-07-26 14:14:24 +0200948 WARN(((hpd[i] & hotplug_trigger) &&
949 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
950 "Received HPD interrupt although disabled\n");
951
Egbert Eichb543fb02013-04-16 13:36:54 +0200952 if (!(hpd[i] & hotplug_trigger) ||
953 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
954 continue;
955
Jani Nikulabc5ead8c2013-05-07 15:10:29 +0300956 dev_priv->hpd_event_bits |= (1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +0200957 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
958 dev_priv->hpd_stats[i].hpd_last_jiffies
959 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
960 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
961 dev_priv->hpd_stats[i].hpd_cnt = 0;
Egbert Eichb8f102e2013-07-26 14:14:24 +0200962 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
Egbert Eichb543fb02013-04-16 13:36:54 +0200963 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
964 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
Egbert Eich142e2392013-04-11 15:57:57 +0200965 dev_priv->hpd_event_bits &= ~(1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +0200966 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
Daniel Vetter10a504d2013-06-27 17:52:12 +0200967 storm_detected = true;
Egbert Eichb543fb02013-04-16 13:36:54 +0200968 } else {
969 dev_priv->hpd_stats[i].hpd_cnt++;
Egbert Eichb8f102e2013-07-26 14:14:24 +0200970 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
971 dev_priv->hpd_stats[i].hpd_cnt);
Egbert Eichb543fb02013-04-16 13:36:54 +0200972 }
973 }
974
Daniel Vetter10a504d2013-06-27 17:52:12 +0200975 if (storm_detected)
976 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +0200977 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter5876fa02013-06-27 17:52:13 +0200978
979 queue_work(dev_priv->wq,
980 &dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +0200981}
982
Daniel Vetter515ac2b2012-12-01 13:53:44 +0100983static void gmbus_irq_handler(struct drm_device *dev)
984{
Daniel Vetter28c70f12012-12-01 13:53:45 +0100985 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
986
Daniel Vetter28c70f12012-12-01 13:53:45 +0100987 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +0100988}
989
Daniel Vetterce99c252012-12-01 13:53:47 +0100990static void dp_aux_irq_handler(struct drm_device *dev)
991{
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100992 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
993
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100994 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +0100995}
996
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200997/* Unlike gen6_rps_irq_handler() from which this function is originally derived,
Ben Widawskybaf02a12013-05-28 19:22:24 -0700998 * we must be able to deal with other PM interrupts. This is complicated because
999 * of the way in which we use the masks to defer the RPS work (which for
1000 * posterity is necessary because of forcewake).
1001 */
1002static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
1003 u32 pm_iir)
1004{
Daniel Vetter41a05a32013-07-04 23:35:26 +02001005 if (pm_iir & GEN6_PM_RPS_EVENTS) {
Daniel Vetter59cdb632013-07-04 23:35:28 +02001006 spin_lock(&dev_priv->irq_lock);
Daniel Vetter41a05a32013-07-04 23:35:26 +02001007 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
Ben Widawskybaf02a12013-05-28 19:22:24 -07001008 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
1009 /* never want to mask useful interrupts. (also posting read) */
Ben Widawsky48484052013-05-28 19:22:27 -07001010 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
Daniel Vetter59cdb632013-07-04 23:35:28 +02001011 spin_unlock(&dev_priv->irq_lock);
Daniel Vetter2adbee62013-07-04 23:35:27 +02001012
1013 queue_work(dev_priv->wq, &dev_priv->rps.work);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001014 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001015
Daniel Vetter41a05a32013-07-04 23:35:26 +02001016 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1017 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
Ben Widawsky12638c52013-05-28 19:22:31 -07001018
Daniel Vetter41a05a32013-07-04 23:35:26 +02001019 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1020 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
1021 i915_handle_error(dev_priv->dev, false);
Ben Widawsky12638c52013-05-28 19:22:31 -07001022 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001023}
1024
Daniel Vetterff1f5252012-10-02 15:10:55 +02001025static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001026{
1027 struct drm_device *dev = (struct drm_device *) arg;
1028 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1029 u32 iir, gt_iir, pm_iir;
1030 irqreturn_t ret = IRQ_NONE;
1031 unsigned long irqflags;
1032 int pipe;
1033 u32 pipe_stats[I915_MAX_PIPES];
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001034
1035 atomic_inc(&dev_priv->irq_received);
1036
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001037 while (true) {
1038 iir = I915_READ(VLV_IIR);
1039 gt_iir = I915_READ(GTIIR);
1040 pm_iir = I915_READ(GEN6_PMIIR);
1041
1042 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1043 goto out;
1044
1045 ret = IRQ_HANDLED;
1046
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001047 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001048
1049 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1050 for_each_pipe(pipe) {
1051 int reg = PIPESTAT(pipe);
1052 pipe_stats[pipe] = I915_READ(reg);
1053
1054 /*
1055 * Clear the PIPE*STAT regs before the IIR
1056 */
1057 if (pipe_stats[pipe] & 0x8000ffff) {
1058 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1059 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1060 pipe_name(pipe));
1061 I915_WRITE(reg, pipe_stats[pipe]);
1062 }
1063 }
1064 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1065
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001066 for_each_pipe(pipe) {
1067 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1068 drm_handle_vblank(dev, pipe);
1069
1070 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1071 intel_prepare_page_flip(dev, pipe);
1072 intel_finish_page_flip(dev, pipe);
1073 }
1074 }
1075
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001076 /* Consume port. Then clear IIR or we'll miss events */
1077 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1078 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02001079 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001080
1081 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1082 hotplug_status);
Daniel Vetter91d131d2013-06-27 17:52:14 +02001083
1084 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1085
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001086 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1087 I915_READ(PORT_HOTPLUG_STAT);
1088 }
1089
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001090 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1091 gmbus_irq_handler(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001092
Ben Widawsky48484052013-05-28 19:22:27 -07001093 if (pm_iir & GEN6_PM_RPS_EVENTS)
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001094 gen6_rps_irq_handler(dev_priv, pm_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001095
1096 I915_WRITE(GTIIR, gt_iir);
1097 I915_WRITE(GEN6_PMIIR, pm_iir);
1098 I915_WRITE(VLV_IIR, iir);
1099 }
1100
1101out:
1102 return ret;
1103}
1104
Adam Jackson23e81d62012-06-06 15:45:44 -04001105static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08001106{
1107 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001108 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001109 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08001110
Daniel Vetter91d131d2013-06-27 17:52:14 +02001111 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1112
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001113 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1114 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1115 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08001116 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001117 port_name(port));
1118 }
Jesse Barnes776ad802011-01-04 15:09:39 -08001119
Daniel Vetterce99c252012-12-01 13:53:47 +01001120 if (pch_iir & SDE_AUX_MASK)
1121 dp_aux_irq_handler(dev);
1122
Jesse Barnes776ad802011-01-04 15:09:39 -08001123 if (pch_iir & SDE_GMBUS)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001124 gmbus_irq_handler(dev);
Jesse Barnes776ad802011-01-04 15:09:39 -08001125
1126 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1127 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1128
1129 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1130 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1131
1132 if (pch_iir & SDE_POISON)
1133 DRM_ERROR("PCH poison interrupt\n");
1134
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001135 if (pch_iir & SDE_FDI_MASK)
1136 for_each_pipe(pipe)
1137 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1138 pipe_name(pipe),
1139 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08001140
1141 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1142 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1143
1144 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1145 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1146
Jesse Barnes776ad802011-01-04 15:09:39 -08001147 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Paulo Zanoni86642812013-04-12 17:57:57 -03001148 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1149 false))
1150 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1151
1152 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1153 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1154 false))
1155 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1156}
1157
1158static void ivb_err_int_handler(struct drm_device *dev)
1159{
1160 struct drm_i915_private *dev_priv = dev->dev_private;
1161 u32 err_int = I915_READ(GEN7_ERR_INT);
1162
Paulo Zanonide032bf2013-04-12 17:57:58 -03001163 if (err_int & ERR_INT_POISON)
1164 DRM_ERROR("Poison interrupt\n");
1165
Paulo Zanoni86642812013-04-12 17:57:57 -03001166 if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1167 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1168 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1169
1170 if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1171 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1172 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1173
1174 if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1175 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1176 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1177
1178 I915_WRITE(GEN7_ERR_INT, err_int);
1179}
1180
1181static void cpt_serr_int_handler(struct drm_device *dev)
1182{
1183 struct drm_i915_private *dev_priv = dev->dev_private;
1184 u32 serr_int = I915_READ(SERR_INT);
1185
Paulo Zanonide032bf2013-04-12 17:57:58 -03001186 if (serr_int & SERR_INT_POISON)
1187 DRM_ERROR("PCH poison interrupt\n");
1188
Paulo Zanoni86642812013-04-12 17:57:57 -03001189 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1190 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1191 false))
1192 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1193
1194 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1195 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1196 false))
1197 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1198
1199 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1200 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1201 false))
1202 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1203
1204 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08001205}
1206
Adam Jackson23e81d62012-06-06 15:45:44 -04001207static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1208{
1209 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1210 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001211 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04001212
Daniel Vetter91d131d2013-06-27 17:52:14 +02001213 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1214
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001215 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1216 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1217 SDE_AUDIO_POWER_SHIFT_CPT);
1218 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1219 port_name(port));
1220 }
Adam Jackson23e81d62012-06-06 15:45:44 -04001221
1222 if (pch_iir & SDE_AUX_MASK_CPT)
Daniel Vetterce99c252012-12-01 13:53:47 +01001223 dp_aux_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001224
1225 if (pch_iir & SDE_GMBUS_CPT)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001226 gmbus_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001227
1228 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1229 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1230
1231 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1232 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1233
1234 if (pch_iir & SDE_FDI_MASK_CPT)
1235 for_each_pipe(pipe)
1236 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1237 pipe_name(pipe),
1238 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03001239
1240 if (pch_iir & SDE_ERROR_CPT)
1241 cpt_serr_int_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001242}
1243
Paulo Zanonic008bc62013-07-12 16:35:10 -03001244static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1245{
1246 struct drm_i915_private *dev_priv = dev->dev_private;
1247
1248 if (de_iir & DE_AUX_CHANNEL_A)
1249 dp_aux_irq_handler(dev);
1250
1251 if (de_iir & DE_GSE)
1252 intel_opregion_asle_intr(dev);
1253
1254 if (de_iir & DE_PIPEA_VBLANK)
1255 drm_handle_vblank(dev, 0);
1256
1257 if (de_iir & DE_PIPEB_VBLANK)
1258 drm_handle_vblank(dev, 1);
1259
1260 if (de_iir & DE_POISON)
1261 DRM_ERROR("Poison interrupt\n");
1262
1263 if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1264 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1265 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1266
1267 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1268 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1269 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1270
1271 if (de_iir & DE_PLANEA_FLIP_DONE) {
1272 intel_prepare_page_flip(dev, 0);
1273 intel_finish_page_flip_plane(dev, 0);
1274 }
1275
1276 if (de_iir & DE_PLANEB_FLIP_DONE) {
1277 intel_prepare_page_flip(dev, 1);
1278 intel_finish_page_flip_plane(dev, 1);
1279 }
1280
1281 /* check event from PCH */
1282 if (de_iir & DE_PCH_EVENT) {
1283 u32 pch_iir = I915_READ(SDEIIR);
1284
1285 if (HAS_PCH_CPT(dev))
1286 cpt_irq_handler(dev, pch_iir);
1287 else
1288 ibx_irq_handler(dev, pch_iir);
1289
1290 /* should clear PCH hotplug event before clear CPU irq */
1291 I915_WRITE(SDEIIR, pch_iir);
1292 }
1293
1294 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1295 ironlake_rps_change_irq_handler(dev);
1296}
1297
Paulo Zanoni9719fb92013-07-12 16:35:11 -03001298static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1299{
1300 struct drm_i915_private *dev_priv = dev->dev_private;
1301 int i;
1302
1303 if (de_iir & DE_ERR_INT_IVB)
1304 ivb_err_int_handler(dev);
1305
1306 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1307 dp_aux_irq_handler(dev);
1308
1309 if (de_iir & DE_GSE_IVB)
1310 intel_opregion_asle_intr(dev);
1311
1312 for (i = 0; i < 3; i++) {
1313 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1314 drm_handle_vblank(dev, i);
1315 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1316 intel_prepare_page_flip(dev, i);
1317 intel_finish_page_flip_plane(dev, i);
1318 }
1319 }
1320
1321 /* check event from PCH */
1322 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1323 u32 pch_iir = I915_READ(SDEIIR);
1324
1325 cpt_irq_handler(dev, pch_iir);
1326
1327 /* clear PCH hotplug event before clear CPU irq */
1328 I915_WRITE(SDEIIR, pch_iir);
1329 }
1330}
1331
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001332static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001333{
1334 struct drm_device *dev = (struct drm_device *) arg;
1335 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001336 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01001337 irqreturn_t ret = IRQ_NONE;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001338
1339 atomic_inc(&dev_priv->irq_received);
1340
Paulo Zanoni86642812013-04-12 17:57:57 -03001341 /* We get interrupts on unclaimed registers, so check for this before we
1342 * do any I915_{READ,WRITE}. */
Chris Wilson907b28c2013-07-19 20:36:52 +01001343 intel_uncore_check_errors(dev);
Paulo Zanoni86642812013-04-12 17:57:57 -03001344
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001345 /* disable master interrupt before clearing iir */
1346 de_ier = I915_READ(DEIER);
1347 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Paulo Zanoni23a78512013-07-12 16:35:14 -03001348 POSTING_READ(DEIER);
Chris Wilson0e434062012-05-09 21:45:44 +01001349
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001350 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1351 * interrupts will will be stored on its back queue, and then we'll be
1352 * able to process them after we restore SDEIER (as soon as we restore
1353 * it, we'll get an interrupt if SDEIIR still has something to process
1354 * due to its back queue). */
Ben Widawskyab5c6082013-04-05 13:12:41 -07001355 if (!HAS_PCH_NOP(dev)) {
1356 sde_ier = I915_READ(SDEIER);
1357 I915_WRITE(SDEIER, 0);
1358 POSTING_READ(SDEIER);
1359 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001360
Paulo Zanoni86642812013-04-12 17:57:57 -03001361 /* On Haswell, also mask ERR_INT because we don't want to risk
1362 * generating "unclaimed register" interrupts from inside the interrupt
1363 * handler. */
Daniel Vetter4bc9d432013-06-27 13:44:58 +02001364 if (IS_HASWELL(dev)) {
1365 spin_lock(&dev_priv->irq_lock);
Paulo Zanoni86642812013-04-12 17:57:57 -03001366 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
Daniel Vetter4bc9d432013-06-27 13:44:58 +02001367 spin_unlock(&dev_priv->irq_lock);
1368 }
Paulo Zanoni86642812013-04-12 17:57:57 -03001369
Chris Wilson0e434062012-05-09 21:45:44 +01001370 gt_iir = I915_READ(GTIIR);
1371 if (gt_iir) {
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001372 if (INTEL_INFO(dev)->gen >= 6)
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001373 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03001374 else
1375 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001376 I915_WRITE(GTIIR, gt_iir);
1377 ret = IRQ_HANDLED;
1378 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001379
1380 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01001381 if (de_iir) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001382 if (INTEL_INFO(dev)->gen >= 7)
1383 ivb_display_irq_handler(dev, de_iir);
1384 else
1385 ilk_display_irq_handler(dev, de_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001386 I915_WRITE(DEIIR, de_iir);
1387 ret = IRQ_HANDLED;
1388 }
1389
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001390 if (INTEL_INFO(dev)->gen >= 6) {
1391 u32 pm_iir = I915_READ(GEN6_PMIIR);
1392 if (pm_iir) {
1393 if (IS_HASWELL(dev))
1394 hsw_pm_irq_handler(dev_priv, pm_iir);
1395 else if (pm_iir & GEN6_PM_RPS_EVENTS)
1396 gen6_rps_irq_handler(dev_priv, pm_iir);
1397 I915_WRITE(GEN6_PMIIR, pm_iir);
1398 ret = IRQ_HANDLED;
1399 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001400 }
1401
Daniel Vetter4bc9d432013-06-27 13:44:58 +02001402 if (IS_HASWELL(dev)) {
1403 spin_lock(&dev_priv->irq_lock);
1404 if (ivb_can_enable_err_int(dev))
1405 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1406 spin_unlock(&dev_priv->irq_lock);
1407 }
Paulo Zanoni86642812013-04-12 17:57:57 -03001408
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001409 I915_WRITE(DEIER, de_ier);
1410 POSTING_READ(DEIER);
Ben Widawskyab5c6082013-04-05 13:12:41 -07001411 if (!HAS_PCH_NOP(dev)) {
1412 I915_WRITE(SDEIER, sde_ier);
1413 POSTING_READ(SDEIER);
1414 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001415
1416 return ret;
1417}
1418
Jesse Barnes8a905232009-07-11 16:48:03 -04001419/**
1420 * i915_error_work_func - do process context error handling work
1421 * @work: work struct
1422 *
1423 * Fire an error uevent so userspace can see that a hang or error
1424 * was detected.
1425 */
1426static void i915_error_work_func(struct work_struct *work)
1427{
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001428 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1429 work);
1430 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1431 gpu_error);
Jesse Barnes8a905232009-07-11 16:48:03 -04001432 struct drm_device *dev = dev_priv->dev;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001433 struct intel_ring_buffer *ring;
Ben Widawskycce723e2013-07-19 09:16:42 -07001434 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1435 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1436 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
Daniel Vetterf69061b2012-12-06 09:01:42 +01001437 int i, ret;
Jesse Barnes8a905232009-07-11 16:48:03 -04001438
Ben Gamarif316a422009-09-14 17:48:46 -04001439 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04001440
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001441 /*
1442 * Note that there's only one work item which does gpu resets, so we
1443 * need not worry about concurrent gpu resets potentially incrementing
1444 * error->reset_counter twice. We only need to take care of another
1445 * racing irq/hangcheck declaring the gpu dead for a second time. A
1446 * quick check for that is good enough: schedule_work ensures the
1447 * correct ordering between hang detection and this work item, and since
1448 * the reset in-progress bit is only ever set by code outside of this
1449 * work we don't need to worry about any other races.
1450 */
1451 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
Chris Wilsonf803aa52010-09-19 12:38:26 +01001452 DRM_DEBUG_DRIVER("resetting chip\n");
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001453 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1454 reset_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001455
Daniel Vetterf69061b2012-12-06 09:01:42 +01001456 ret = i915_reset(dev);
1457
1458 if (ret == 0) {
1459 /*
1460 * After all the gem state is reset, increment the reset
1461 * counter and wake up everyone waiting for the reset to
1462 * complete.
1463 *
1464 * Since unlock operations are a one-sided barrier only,
1465 * we need to insert a barrier here to order any seqno
1466 * updates before
1467 * the counter increment.
1468 */
1469 smp_mb__before_atomic_inc();
1470 atomic_inc(&dev_priv->gpu_error.reset_counter);
1471
1472 kobject_uevent_env(&dev->primary->kdev.kobj,
1473 KOBJ_CHANGE, reset_done_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001474 } else {
1475 atomic_set(&error->reset_counter, I915_WEDGED);
Ben Gamarif316a422009-09-14 17:48:46 -04001476 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001477
Daniel Vetterf69061b2012-12-06 09:01:42 +01001478 for_each_ring(ring, dev_priv, i)
1479 wake_up_all(&ring->irq_queue);
1480
Ville Syrjälä96a02912013-02-18 19:08:49 +02001481 intel_display_handle_reset(dev);
1482
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001483 wake_up_all(&dev_priv->gpu_error.reset_queue);
Ben Gamarif316a422009-09-14 17:48:46 -04001484 }
Jesse Barnes8a905232009-07-11 16:48:03 -04001485}
1486
Chris Wilson35aed2e2010-05-27 13:18:12 +01001487static void i915_report_and_clear_eir(struct drm_device *dev)
Jesse Barnes8a905232009-07-11 16:48:03 -04001488{
1489 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskybd9854f2012-08-23 15:18:09 -07001490 uint32_t instdone[I915_NUM_INSTDONE_REG];
Jesse Barnes8a905232009-07-11 16:48:03 -04001491 u32 eir = I915_READ(EIR);
Ben Widawsky050ee912012-08-22 11:32:15 -07001492 int pipe, i;
Jesse Barnes8a905232009-07-11 16:48:03 -04001493
Chris Wilson35aed2e2010-05-27 13:18:12 +01001494 if (!eir)
1495 return;
Jesse Barnes8a905232009-07-11 16:48:03 -04001496
Joe Perchesa70491c2012-03-18 13:00:11 -07001497 pr_err("render error detected, EIR: 0x%08x\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04001498
Ben Widawskybd9854f2012-08-23 15:18:09 -07001499 i915_get_extra_instdone(dev, instdone);
1500
Jesse Barnes8a905232009-07-11 16:48:03 -04001501 if (IS_G4X(dev)) {
1502 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1503 u32 ipeir = I915_READ(IPEIR_I965);
1504
Joe Perchesa70491c2012-03-18 13:00:11 -07001505 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1506 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Ben Widawsky050ee912012-08-22 11:32:15 -07001507 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1508 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Joe Perchesa70491c2012-03-18 13:00:11 -07001509 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07001510 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04001511 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001512 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04001513 }
1514 if (eir & GM45_ERROR_PAGE_TABLE) {
1515 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07001516 pr_err("page table error\n");
1517 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04001518 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001519 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04001520 }
1521 }
1522
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001523 if (!IS_GEN2(dev)) {
Jesse Barnes8a905232009-07-11 16:48:03 -04001524 if (eir & I915_ERROR_PAGE_TABLE) {
1525 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07001526 pr_err("page table error\n");
1527 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04001528 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001529 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04001530 }
1531 }
1532
1533 if (eir & I915_ERROR_MEMORY_REFRESH) {
Joe Perchesa70491c2012-03-18 13:00:11 -07001534 pr_err("memory refresh error:\n");
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001535 for_each_pipe(pipe)
Joe Perchesa70491c2012-03-18 13:00:11 -07001536 pr_err("pipe %c stat: 0x%08x\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001537 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
Jesse Barnes8a905232009-07-11 16:48:03 -04001538 /* pipestat has already been acked */
1539 }
1540 if (eir & I915_ERROR_INSTRUCTION) {
Joe Perchesa70491c2012-03-18 13:00:11 -07001541 pr_err("instruction error\n");
1542 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
Ben Widawsky050ee912012-08-22 11:32:15 -07001543 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1544 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001545 if (INTEL_INFO(dev)->gen < 4) {
Jesse Barnes8a905232009-07-11 16:48:03 -04001546 u32 ipeir = I915_READ(IPEIR);
1547
Joe Perchesa70491c2012-03-18 13:00:11 -07001548 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1549 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
Joe Perchesa70491c2012-03-18 13:00:11 -07001550 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
Jesse Barnes8a905232009-07-11 16:48:03 -04001551 I915_WRITE(IPEIR, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001552 POSTING_READ(IPEIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04001553 } else {
1554 u32 ipeir = I915_READ(IPEIR_I965);
1555
Joe Perchesa70491c2012-03-18 13:00:11 -07001556 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1557 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Joe Perchesa70491c2012-03-18 13:00:11 -07001558 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07001559 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04001560 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001561 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04001562 }
1563 }
1564
1565 I915_WRITE(EIR, eir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001566 POSTING_READ(EIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04001567 eir = I915_READ(EIR);
1568 if (eir) {
1569 /*
1570 * some errors might have become stuck,
1571 * mask them.
1572 */
1573 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1574 I915_WRITE(EMR, I915_READ(EMR) | eir);
1575 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1576 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01001577}
1578
1579/**
1580 * i915_handle_error - handle an error interrupt
1581 * @dev: drm device
1582 *
1583 * Do some basic checking of regsiter state at error interrupt time and
1584 * dump it to the syslog. Also call i915_capture_error_state() to make
1585 * sure we get a record and make it available in debugfs. Fire a uevent
1586 * so userspace knows something bad happened (should trigger collection
1587 * of a ring dump etc.).
1588 */
Chris Wilson527f9e92010-11-11 01:16:58 +00001589void i915_handle_error(struct drm_device *dev, bool wedged)
Chris Wilson35aed2e2010-05-27 13:18:12 +01001590{
1591 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01001592 struct intel_ring_buffer *ring;
1593 int i;
Chris Wilson35aed2e2010-05-27 13:18:12 +01001594
1595 i915_capture_error_state(dev);
1596 i915_report_and_clear_eir(dev);
Jesse Barnes8a905232009-07-11 16:48:03 -04001597
Ben Gamariba1234d2009-09-14 17:48:47 -04001598 if (wedged) {
Daniel Vetterf69061b2012-12-06 09:01:42 +01001599 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1600 &dev_priv->gpu_error.reset_counter);
Ben Gamariba1234d2009-09-14 17:48:47 -04001601
Ben Gamari11ed50e2009-09-14 17:48:45 -04001602 /*
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001603 * Wakeup waiting processes so that the reset work item
1604 * doesn't deadlock trying to grab various locks.
Ben Gamari11ed50e2009-09-14 17:48:45 -04001605 */
Chris Wilsonb4519512012-05-11 14:29:30 +01001606 for_each_ring(ring, dev_priv, i)
1607 wake_up_all(&ring->irq_queue);
Ben Gamari11ed50e2009-09-14 17:48:45 -04001608 }
1609
Daniel Vetter99584db2012-11-14 17:14:04 +01001610 queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
Jesse Barnes8a905232009-07-11 16:48:03 -04001611}
1612
Ville Syrjälä21ad8332013-02-19 15:16:39 +02001613static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001614{
1615 drm_i915_private_t *dev_priv = dev->dev_private;
1616 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1617 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Chris Wilson05394f32010-11-08 19:18:58 +00001618 struct drm_i915_gem_object *obj;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001619 struct intel_unpin_work *work;
1620 unsigned long flags;
1621 bool stall_detected;
1622
1623 /* Ignore early vblank irqs */
1624 if (intel_crtc == NULL)
1625 return;
1626
1627 spin_lock_irqsave(&dev->event_lock, flags);
1628 work = intel_crtc->unpin_work;
1629
Chris Wilsone7d841c2012-12-03 11:36:30 +00001630 if (work == NULL ||
1631 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1632 !work->enable_stall_check) {
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001633 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1634 spin_unlock_irqrestore(&dev->event_lock, flags);
1635 return;
1636 }
1637
1638 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
Chris Wilson05394f32010-11-08 19:18:58 +00001639 obj = work->pending_flip_obj;
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001640 if (INTEL_INFO(dev)->gen >= 4) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001641 int dspsurf = DSPSURF(intel_crtc->plane);
Armin Reese446f2542012-03-30 16:20:16 -07001642 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001643 i915_gem_obj_ggtt_offset(obj);
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001644 } else {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001645 int dspaddr = DSPADDR(intel_crtc->plane);
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001646 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
Ville Syrjälä01f2c772011-12-20 00:06:49 +02001647 crtc->y * crtc->fb->pitches[0] +
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001648 crtc->x * crtc->fb->bits_per_pixel/8);
1649 }
1650
1651 spin_unlock_irqrestore(&dev->event_lock, flags);
1652
1653 if (stall_detected) {
1654 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1655 intel_prepare_page_flip(dev, intel_crtc->plane);
1656 }
1657}
1658
Keith Packard42f52ef2008-10-18 19:39:29 -07001659/* Called from drm generic code, passed 'crtc' which
1660 * we use as a pipe index
1661 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07001662static int i915_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07001663{
1664 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07001665 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08001666
Chris Wilson5eddb702010-09-11 13:48:45 +01001667 if (!i915_pipe_enabled(dev, pipe))
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08001668 return -EINVAL;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07001669
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001670 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07001671 if (INTEL_INFO(dev)->gen >= 4)
Keith Packard7c463582008-11-04 02:03:27 -08001672 i915_enable_pipestat(dev_priv, pipe,
1673 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Keith Packarde9d21d72008-10-16 11:31:38 -07001674 else
Keith Packard7c463582008-11-04 02:03:27 -08001675 i915_enable_pipestat(dev_priv, pipe,
1676 PIPE_VBLANK_INTERRUPT_ENABLE);
Chris Wilson8692d00e2011-02-05 10:08:21 +00001677
1678 /* maintain vblank delivery even in deep C-states */
1679 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02001680 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001681 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00001682
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07001683 return 0;
1684}
1685
Jesse Barnesf71d4af2011-06-28 13:00:41 -07001686static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07001687{
1688 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1689 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03001690 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1691 DE_PIPE_VBLANK_ILK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07001692
1693 if (!i915_pipe_enabled(dev, pipe))
1694 return -EINVAL;
1695
1696 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03001697 ironlake_enable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001698 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1699
1700 return 0;
1701}
1702
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001703static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1704{
1705 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1706 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001707 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001708
1709 if (!i915_pipe_enabled(dev, pipe))
1710 return -EINVAL;
1711
1712 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001713 imr = I915_READ(VLV_IMR);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001714 if (pipe == 0)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001715 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001716 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001717 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001718 I915_WRITE(VLV_IMR, imr);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001719 i915_enable_pipestat(dev_priv, pipe,
1720 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001721 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1722
1723 return 0;
1724}
1725
Keith Packard42f52ef2008-10-18 19:39:29 -07001726/* Called from drm generic code, passed 'crtc' which
1727 * we use as a pipe index
1728 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07001729static void i915_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07001730{
1731 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07001732 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07001733
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001734 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00001735 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02001736 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson8692d00e2011-02-05 10:08:21 +00001737
Jesse Barnesf796cf82011-04-07 13:58:17 -07001738 i915_disable_pipestat(dev_priv, pipe,
1739 PIPE_VBLANK_INTERRUPT_ENABLE |
1740 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1741 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1742}
1743
Jesse Barnesf71d4af2011-06-28 13:00:41 -07001744static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07001745{
1746 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1747 unsigned long irqflags;
Paulo Zanonib5184212013-07-12 20:00:08 -03001748 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1749 DE_PIPE_VBLANK_ILK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07001750
1751 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Paulo Zanonib5184212013-07-12 20:00:08 -03001752 ironlake_disable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001753 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1754}
1755
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001756static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1757{
1758 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1759 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001760 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001761
1762 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001763 i915_disable_pipestat(dev_priv, pipe,
1764 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001765 imr = I915_READ(VLV_IMR);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001766 if (pipe == 0)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001767 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001768 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001769 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001770 I915_WRITE(VLV_IMR, imr);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001771 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1772}
1773
Chris Wilson893eead2010-10-27 14:44:35 +01001774static u32
1775ring_last_seqno(struct intel_ring_buffer *ring)
Zou Nan hai852835f2010-05-21 09:08:56 +08001776{
Chris Wilson893eead2010-10-27 14:44:35 +01001777 return list_entry(ring->request_list.prev,
1778 struct drm_i915_gem_request, list)->seqno;
1779}
1780
Chris Wilson9107e9d2013-06-10 11:20:20 +01001781static bool
1782ring_idle(struct intel_ring_buffer *ring, u32 seqno)
Chris Wilson893eead2010-10-27 14:44:35 +01001783{
Chris Wilson9107e9d2013-06-10 11:20:20 +01001784 return (list_empty(&ring->request_list) ||
1785 i915_seqno_passed(seqno, ring_last_seqno(ring)));
Ben Gamarif65d9422009-09-14 17:48:44 -04001786}
1787
Chris Wilson6274f212013-06-10 11:20:21 +01001788static struct intel_ring_buffer *
1789semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
Chris Wilsona24a11e2013-03-14 17:52:05 +02001790{
1791 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Chris Wilson6274f212013-06-10 11:20:21 +01001792 u32 cmd, ipehr, acthd, acthd_min;
Chris Wilsona24a11e2013-03-14 17:52:05 +02001793
1794 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1795 if ((ipehr & ~(0x3 << 16)) !=
1796 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
Chris Wilson6274f212013-06-10 11:20:21 +01001797 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02001798
1799 /* ACTHD is likely pointing to the dword after the actual command,
1800 * so scan backwards until we find the MBOX.
1801 */
Chris Wilson6274f212013-06-10 11:20:21 +01001802 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
Chris Wilsona24a11e2013-03-14 17:52:05 +02001803 acthd_min = max((int)acthd - 3 * 4, 0);
1804 do {
1805 cmd = ioread32(ring->virtual_start + acthd);
1806 if (cmd == ipehr)
1807 break;
1808
1809 acthd -= 4;
1810 if (acthd < acthd_min)
Chris Wilson6274f212013-06-10 11:20:21 +01001811 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02001812 } while (1);
1813
Chris Wilson6274f212013-06-10 11:20:21 +01001814 *seqno = ioread32(ring->virtual_start+acthd+4)+1;
1815 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
Chris Wilsona24a11e2013-03-14 17:52:05 +02001816}
1817
Chris Wilson6274f212013-06-10 11:20:21 +01001818static int semaphore_passed(struct intel_ring_buffer *ring)
1819{
1820 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1821 struct intel_ring_buffer *signaller;
1822 u32 seqno, ctl;
1823
1824 ring->hangcheck.deadlock = true;
1825
1826 signaller = semaphore_waits_for(ring, &seqno);
1827 if (signaller == NULL || signaller->hangcheck.deadlock)
1828 return -1;
1829
1830 /* cursory check for an unkickable deadlock */
1831 ctl = I915_READ_CTL(signaller);
1832 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
1833 return -1;
1834
1835 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
1836}
1837
1838static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
1839{
1840 struct intel_ring_buffer *ring;
1841 int i;
1842
1843 for_each_ring(ring, dev_priv, i)
1844 ring->hangcheck.deadlock = false;
1845}
1846
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03001847static enum intel_ring_hangcheck_action
1848ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001849{
1850 struct drm_device *dev = ring->dev;
1851 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9107e9d2013-06-10 11:20:20 +01001852 u32 tmp;
1853
Chris Wilson6274f212013-06-10 11:20:21 +01001854 if (ring->hangcheck.acthd != acthd)
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001855 return HANGCHECK_ACTIVE;
Chris Wilson6274f212013-06-10 11:20:21 +01001856
Chris Wilson9107e9d2013-06-10 11:20:20 +01001857 if (IS_GEN2(dev))
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001858 return HANGCHECK_HUNG;
Chris Wilson9107e9d2013-06-10 11:20:20 +01001859
1860 /* Is the chip hanging on a WAIT_FOR_EVENT?
1861 * If so we can simply poke the RB_WAIT bit
1862 * and break the hang. This should work on
1863 * all but the second generation chipsets.
1864 */
1865 tmp = I915_READ_CTL(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001866 if (tmp & RING_WAIT) {
1867 DRM_ERROR("Kicking stuck wait on %s\n",
1868 ring->name);
1869 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001870 return HANGCHECK_KICK;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001871 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02001872
Chris Wilson6274f212013-06-10 11:20:21 +01001873 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
1874 switch (semaphore_passed(ring)) {
1875 default:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001876 return HANGCHECK_HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01001877 case 1:
1878 DRM_ERROR("Kicking stuck semaphore on %s\n",
1879 ring->name);
1880 I915_WRITE_CTL(ring, tmp);
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001881 return HANGCHECK_KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01001882 case 0:
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001883 return HANGCHECK_WAIT;
Chris Wilson6274f212013-06-10 11:20:21 +01001884 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01001885 }
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03001886
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001887 return HANGCHECK_HUNG;
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03001888}
1889
Ben Gamarif65d9422009-09-14 17:48:44 -04001890/**
1891 * This is called when the chip hasn't reported back with completed
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001892 * batchbuffers in a long time. We keep track per ring seqno progress and
1893 * if there are no progress, hangcheck score for that ring is increased.
1894 * Further, acthd is inspected to see if the ring is stuck. On stuck case
1895 * we kick the ring. If we see no progress on three subsequent calls
1896 * we assume chip is wedged and try to fix it by resetting the chip.
Ben Gamarif65d9422009-09-14 17:48:44 -04001897 */
Damien Lespiaua658b5d2013-08-08 22:28:56 +01001898static void i915_hangcheck_elapsed(unsigned long data)
Ben Gamarif65d9422009-09-14 17:48:44 -04001899{
1900 struct drm_device *dev = (struct drm_device *)data;
1901 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01001902 struct intel_ring_buffer *ring;
Chris Wilsonb4519512012-05-11 14:29:30 +01001903 int i;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001904 int busy_count = 0, rings_hung = 0;
Chris Wilson9107e9d2013-06-10 11:20:20 +01001905 bool stuck[I915_NUM_RINGS] = { 0 };
1906#define BUSY 1
1907#define KICK 5
1908#define HUNG 20
1909#define FIRE 30
Chris Wilson893eead2010-10-27 14:44:35 +01001910
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07001911 if (!i915_enable_hangcheck)
1912 return;
1913
Chris Wilsonb4519512012-05-11 14:29:30 +01001914 for_each_ring(ring, dev_priv, i) {
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001915 u32 seqno, acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01001916 bool busy = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01001917
Chris Wilson6274f212013-06-10 11:20:21 +01001918 semaphore_clear_deadlocks(dev_priv);
1919
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001920 seqno = ring->get_seqno(ring, false);
1921 acthd = intel_ring_get_active_head(ring);
Chris Wilsond1e61e72012-04-10 17:00:41 +01001922
Chris Wilson9107e9d2013-06-10 11:20:20 +01001923 if (ring->hangcheck.seqno == seqno) {
1924 if (ring_idle(ring, seqno)) {
1925 if (waitqueue_active(&ring->irq_queue)) {
1926 /* Issue a wake-up to catch stuck h/w. */
1927 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1928 ring->name);
1929 wake_up_all(&ring->irq_queue);
1930 ring->hangcheck.score += HUNG;
1931 } else
1932 busy = false;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001933 } else {
Chris Wilson6274f212013-06-10 11:20:21 +01001934 /* We always increment the hangcheck score
1935 * if the ring is busy and still processing
1936 * the same request, so that no single request
1937 * can run indefinitely (such as a chain of
1938 * batches). The only time we do not increment
1939 * the hangcheck score on this ring, if this
1940 * ring is in a legitimate wait for another
1941 * ring. In that case the waiting ring is a
1942 * victim and we want to be sure we catch the
1943 * right culprit. Then every time we do kick
1944 * the ring, add a small increment to the
1945 * score so that we can catch a batch that is
1946 * being repeatedly kicked and so responsible
1947 * for stalling the machine.
1948 */
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03001949 ring->hangcheck.action = ring_stuck(ring,
1950 acthd);
1951
1952 switch (ring->hangcheck.action) {
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001953 case HANGCHECK_WAIT:
Chris Wilson6274f212013-06-10 11:20:21 +01001954 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001955 case HANGCHECK_ACTIVE:
Jani Nikulaea04cb32013-08-11 12:44:02 +03001956 ring->hangcheck.score += BUSY;
Chris Wilson6274f212013-06-10 11:20:21 +01001957 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001958 case HANGCHECK_KICK:
Jani Nikulaea04cb32013-08-11 12:44:02 +03001959 ring->hangcheck.score += KICK;
Chris Wilson6274f212013-06-10 11:20:21 +01001960 break;
Jani Nikulaf2f4d822013-08-11 12:44:01 +03001961 case HANGCHECK_HUNG:
Jani Nikulaea04cb32013-08-11 12:44:02 +03001962 ring->hangcheck.score += HUNG;
Chris Wilson6274f212013-06-10 11:20:21 +01001963 stuck[i] = true;
1964 break;
1965 }
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001966 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01001967 } else {
1968 /* Gradually reduce the count so that we catch DoS
1969 * attempts across multiple batches.
1970 */
1971 if (ring->hangcheck.score > 0)
1972 ring->hangcheck.score--;
Chris Wilsond1e61e72012-04-10 17:00:41 +01001973 }
1974
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001975 ring->hangcheck.seqno = seqno;
1976 ring->hangcheck.acthd = acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01001977 busy_count += busy;
Chris Wilson893eead2010-10-27 14:44:35 +01001978 }
Eric Anholtb9201c12010-01-08 14:25:16 -08001979
Mika Kuoppala92cab732013-05-24 17:16:07 +03001980 for_each_ring(ring, dev_priv, i) {
Chris Wilson9107e9d2013-06-10 11:20:20 +01001981 if (ring->hangcheck.score > FIRE) {
Ben Widawskyacd78c12013-06-13 21:33:33 -07001982 DRM_ERROR("%s on %s\n",
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001983 stuck[i] ? "stuck" : "no progress",
Chris Wilsona43adf02013-06-10 11:20:22 +01001984 ring->name);
1985 rings_hung++;
Mika Kuoppala92cab732013-05-24 17:16:07 +03001986 }
1987 }
1988
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001989 if (rings_hung)
1990 return i915_handle_error(dev, true);
Ben Gamarif65d9422009-09-14 17:48:44 -04001991
Mika Kuoppala05407ff2013-05-30 09:04:29 +03001992 if (busy_count)
1993 /* Reset timer case chip hangs without another request
1994 * being added */
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03001995 i915_queue_hangcheck(dev);
1996}
1997
1998void i915_queue_hangcheck(struct drm_device *dev)
1999{
2000 struct drm_i915_private *dev_priv = dev->dev_private;
2001 if (!i915_enable_hangcheck)
2002 return;
2003
2004 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2005 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
Ben Gamarif65d9422009-09-14 17:48:44 -04002006}
2007
Paulo Zanoni91738a92013-06-05 14:21:51 -03002008static void ibx_irq_preinstall(struct drm_device *dev)
2009{
2010 struct drm_i915_private *dev_priv = dev->dev_private;
2011
2012 if (HAS_PCH_NOP(dev))
2013 return;
2014
2015 /* south display irq */
2016 I915_WRITE(SDEIMR, 0xffffffff);
2017 /*
2018 * SDEIER is also touched by the interrupt handler to work around missed
2019 * PCH interrupts. Hence we can't update it after the interrupt handler
2020 * is enabled - instead we unconditionally enable all PCH interrupt
2021 * sources here, but then only unmask them as needed with SDEIMR.
2022 */
2023 I915_WRITE(SDEIER, 0xffffffff);
2024 POSTING_READ(SDEIER);
2025}
2026
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002027static void gen5_gt_irq_preinstall(struct drm_device *dev)
2028{
2029 struct drm_i915_private *dev_priv = dev->dev_private;
2030
2031 /* and GT */
2032 I915_WRITE(GTIMR, 0xffffffff);
2033 I915_WRITE(GTIER, 0x0);
2034 POSTING_READ(GTIER);
2035
2036 if (INTEL_INFO(dev)->gen >= 6) {
2037 /* and PM */
2038 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2039 I915_WRITE(GEN6_PMIER, 0x0);
2040 POSTING_READ(GEN6_PMIER);
2041 }
2042}
2043
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044/* drm_dma.h hooks
2045*/
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002046static void ironlake_irq_preinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002047{
2048 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2049
Jesse Barnes46979952011-04-07 13:53:55 -07002050 atomic_set(&dev_priv->irq_received, 0);
2051
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002052 I915_WRITE(HWSTAM, 0xeffe);
Daniel Vetterbdfcdb62012-01-05 01:05:26 +01002053
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002054 I915_WRITE(DEIMR, 0xffffffff);
2055 I915_WRITE(DEIER, 0x0);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002056 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002057
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002058 gen5_gt_irq_preinstall(dev);
Zhenyu Wangc6501562009-11-03 18:57:21 +00002059
Paulo Zanoni91738a92013-06-05 14:21:51 -03002060 ibx_irq_preinstall(dev);
Ben Widawsky7d991632013-05-28 19:22:25 -07002061}
2062
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002063static void valleyview_irq_preinstall(struct drm_device *dev)
2064{
2065 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2066 int pipe;
2067
2068 atomic_set(&dev_priv->irq_received, 0);
2069
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002070 /* VLV magic */
2071 I915_WRITE(VLV_IMR, 0);
2072 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2073 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2074 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2075
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002076 /* and GT */
2077 I915_WRITE(GTIIR, I915_READ(GTIIR));
2078 I915_WRITE(GTIIR, I915_READ(GTIIR));
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002079
2080 gen5_gt_irq_preinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002081
2082 I915_WRITE(DPINVGTT, 0xff);
2083
2084 I915_WRITE(PORT_HOTPLUG_EN, 0);
2085 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2086 for_each_pipe(pipe)
2087 I915_WRITE(PIPESTAT(pipe), 0xffff);
2088 I915_WRITE(VLV_IIR, 0xffffffff);
2089 I915_WRITE(VLV_IMR, 0xffffffff);
2090 I915_WRITE(VLV_IER, 0x0);
2091 POSTING_READ(VLV_IER);
2092}
2093
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002094static void ibx_hpd_irq_setup(struct drm_device *dev)
Keith Packard7fe0b972011-09-19 13:31:02 -07002095{
2096 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002097 struct drm_mode_config *mode_config = &dev->mode_config;
2098 struct intel_encoder *intel_encoder;
Daniel Vetterfee884e2013-07-04 23:35:21 +02002099 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
Keith Packard7fe0b972011-09-19 13:31:02 -07002100
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002101 if (HAS_PCH_IBX(dev)) {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002102 hotplug_irqs = SDE_HOTPLUG_MASK;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002103 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002104 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002105 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002106 } else {
Daniel Vetterfee884e2013-07-04 23:35:21 +02002107 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002108 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002109 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
Daniel Vetterfee884e2013-07-04 23:35:21 +02002110 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002111 }
2112
Daniel Vetterfee884e2013-07-04 23:35:21 +02002113 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002114
2115 /*
2116 * Enable digital hotplug on the PCH, and configure the DP short pulse
2117 * duration to 2ms (which is the minimum in the Display Port spec)
2118 *
2119 * This register is the same on all known PCH chips.
2120 */
Keith Packard7fe0b972011-09-19 13:31:02 -07002121 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2122 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2123 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2124 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2125 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2126 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2127}
2128
Paulo Zanonid46da432013-02-08 17:35:15 -02002129static void ibx_irq_postinstall(struct drm_device *dev)
2130{
2131 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002132 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02002133
Daniel Vetter692a04c2013-05-29 21:43:05 +02002134 if (HAS_PCH_NOP(dev))
2135 return;
2136
Paulo Zanoni86642812013-04-12 17:57:57 -03002137 if (HAS_PCH_IBX(dev)) {
2138 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
Paulo Zanonide032bf2013-04-12 17:57:58 -03002139 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
Paulo Zanoni86642812013-04-12 17:57:57 -03002140 } else {
2141 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2142
2143 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2144 }
Ben Widawskyab5c6082013-04-05 13:12:41 -07002145
Paulo Zanonid46da432013-02-08 17:35:15 -02002146 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2147 I915_WRITE(SDEIMR, ~mask);
Paulo Zanonid46da432013-02-08 17:35:15 -02002148}
2149
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002150static void gen5_gt_irq_postinstall(struct drm_device *dev)
2151{
2152 struct drm_i915_private *dev_priv = dev->dev_private;
2153 u32 pm_irqs, gt_irqs;
2154
2155 pm_irqs = gt_irqs = 0;
2156
2157 dev_priv->gt_irq_mask = ~0;
2158 if (HAS_L3_GPU_CACHE(dev)) {
2159 /* L3 parity interrupt is always unmasked. */
2160 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2161 gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2162 }
2163
2164 gt_irqs |= GT_RENDER_USER_INTERRUPT;
2165 if (IS_GEN5(dev)) {
2166 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2167 ILK_BSD_USER_INTERRUPT;
2168 } else {
2169 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2170 }
2171
2172 I915_WRITE(GTIIR, I915_READ(GTIIR));
2173 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2174 I915_WRITE(GTIER, gt_irqs);
2175 POSTING_READ(GTIER);
2176
2177 if (INTEL_INFO(dev)->gen >= 6) {
2178 pm_irqs |= GEN6_PM_RPS_EVENTS;
2179
2180 if (HAS_VEBOX(dev))
2181 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2182
2183 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2184 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2185 I915_WRITE(GEN6_PMIER, pm_irqs);
2186 POSTING_READ(GEN6_PMIER);
2187 }
2188}
2189
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002190static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002191{
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002192 unsigned long irqflags;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002194 u32 display_mask, extra_mask;
2195
2196 if (INTEL_INFO(dev)->gen >= 7) {
2197 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2198 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
2199 DE_PLANEB_FLIP_DONE_IVB |
2200 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2201 DE_ERR_INT_IVB);
2202 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2203 DE_PIPEA_VBLANK_IVB);
2204
2205 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2206 } else {
2207 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2208 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2209 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2210 DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
2211 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2212 }
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002213
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002214 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002215
2216 /* should always can generate irq */
2217 I915_WRITE(DEIIR, I915_READ(DEIIR));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002218 I915_WRITE(DEIMR, dev_priv->irq_mask);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03002219 I915_WRITE(DEIER, display_mask | extra_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002220 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002221
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002222 gen5_gt_irq_postinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002223
Paulo Zanonid46da432013-02-08 17:35:15 -02002224 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07002225
Jesse Barnesf97108d2010-01-29 11:27:07 -08002226 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02002227 /* Enable PCU event interrupts
2228 *
2229 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002230 * setup is guaranteed to run in single-threaded context. But we
2231 * need it to make the assert_spin_locked happy. */
2232 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08002233 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002234 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08002235 }
2236
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002237 return 0;
2238}
2239
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002240static int valleyview_irq_postinstall(struct drm_device *dev)
2241{
2242 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002243 u32 enable_mask;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002244 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
Daniel Vetterb79480b2013-06-27 17:52:10 +02002245 unsigned long irqflags;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002246
2247 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002248 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2249 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2250 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002251 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2252
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002253 /*
2254 *Leave vblank interrupts masked initially. enable/disable will
2255 * toggle them based on usage.
2256 */
2257 dev_priv->irq_mask = (~enable_mask) |
2258 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2259 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002260
Daniel Vetter20afbda2012-12-11 14:05:07 +01002261 I915_WRITE(PORT_HOTPLUG_EN, 0);
2262 POSTING_READ(PORT_HOTPLUG_EN);
2263
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002264 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2265 I915_WRITE(VLV_IER, enable_mask);
2266 I915_WRITE(VLV_IIR, 0xffffffff);
2267 I915_WRITE(PIPESTAT(0), 0xffff);
2268 I915_WRITE(PIPESTAT(1), 0xffff);
2269 POSTING_READ(VLV_IER);
2270
Daniel Vetterb79480b2013-06-27 17:52:10 +02002271 /* Interrupt setup is already guaranteed to be single-threaded, this is
2272 * just to make the assert_spin_locked check happy. */
2273 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002274 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01002275 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002276 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
Daniel Vetterb79480b2013-06-27 17:52:10 +02002277 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002278
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002279 I915_WRITE(VLV_IIR, 0xffffffff);
2280 I915_WRITE(VLV_IIR, 0xffffffff);
2281
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02002282 gen5_gt_irq_postinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002283
2284 /* ack & enable invalid PTE error interrupts */
2285#if 0 /* FIXME: add support to irq handler for checking these bits */
2286 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2287 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2288#endif
2289
2290 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Daniel Vetter20afbda2012-12-11 14:05:07 +01002291
2292 return 0;
2293}
2294
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002295static void valleyview_irq_uninstall(struct drm_device *dev)
2296{
2297 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2298 int pipe;
2299
2300 if (!dev_priv)
2301 return;
2302
Egbert Eichac4c16c2013-04-16 13:36:58 +02002303 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2304
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002305 for_each_pipe(pipe)
2306 I915_WRITE(PIPESTAT(pipe), 0xffff);
2307
2308 I915_WRITE(HWSTAM, 0xffffffff);
2309 I915_WRITE(PORT_HOTPLUG_EN, 0);
2310 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2311 for_each_pipe(pipe)
2312 I915_WRITE(PIPESTAT(pipe), 0xffff);
2313 I915_WRITE(VLV_IIR, 0xffffffff);
2314 I915_WRITE(VLV_IMR, 0xffffffff);
2315 I915_WRITE(VLV_IER, 0x0);
2316 POSTING_READ(VLV_IER);
2317}
2318
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002319static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002320{
2321 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes46979952011-04-07 13:53:55 -07002322
2323 if (!dev_priv)
2324 return;
2325
Egbert Eichac4c16c2013-04-16 13:36:58 +02002326 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2327
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002328 I915_WRITE(HWSTAM, 0xffffffff);
2329
2330 I915_WRITE(DEIMR, 0xffffffff);
2331 I915_WRITE(DEIER, 0x0);
2332 I915_WRITE(DEIIR, I915_READ(DEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03002333 if (IS_GEN7(dev))
2334 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002335
2336 I915_WRITE(GTIMR, 0xffffffff);
2337 I915_WRITE(GTIER, 0x0);
2338 I915_WRITE(GTIIR, I915_READ(GTIIR));
Keith Packard192aac1f2011-09-20 10:12:44 -07002339
Ben Widawskyab5c6082013-04-05 13:12:41 -07002340 if (HAS_PCH_NOP(dev))
2341 return;
2342
Keith Packard192aac1f2011-09-20 10:12:44 -07002343 I915_WRITE(SDEIMR, 0xffffffff);
2344 I915_WRITE(SDEIER, 0x0);
2345 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03002346 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2347 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002348}
2349
Chris Wilsonc2798b12012-04-22 21:13:57 +01002350static void i8xx_irq_preinstall(struct drm_device * dev)
2351{
2352 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2353 int pipe;
2354
2355 atomic_set(&dev_priv->irq_received, 0);
2356
2357 for_each_pipe(pipe)
2358 I915_WRITE(PIPESTAT(pipe), 0);
2359 I915_WRITE16(IMR, 0xffff);
2360 I915_WRITE16(IER, 0x0);
2361 POSTING_READ16(IER);
2362}
2363
2364static int i8xx_irq_postinstall(struct drm_device *dev)
2365{
2366 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2367
Chris Wilsonc2798b12012-04-22 21:13:57 +01002368 I915_WRITE16(EMR,
2369 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2370
2371 /* Unmask the interrupts that we always want on. */
2372 dev_priv->irq_mask =
2373 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2374 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2375 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2376 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2377 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2378 I915_WRITE16(IMR, dev_priv->irq_mask);
2379
2380 I915_WRITE16(IER,
2381 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2382 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2383 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2384 I915_USER_INTERRUPT);
2385 POSTING_READ16(IER);
2386
2387 return 0;
2388}
2389
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002390/*
2391 * Returns true when a page flip has completed.
2392 */
2393static bool i8xx_handle_vblank(struct drm_device *dev,
2394 int pipe, u16 iir)
2395{
2396 drm_i915_private_t *dev_priv = dev->dev_private;
2397 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2398
2399 if (!drm_handle_vblank(dev, pipe))
2400 return false;
2401
2402 if ((iir & flip_pending) == 0)
2403 return false;
2404
2405 intel_prepare_page_flip(dev, pipe);
2406
2407 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2408 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2409 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2410 * the flip is completed (no longer pending). Since this doesn't raise
2411 * an interrupt per se, we watch for the change at vblank.
2412 */
2413 if (I915_READ16(ISR) & flip_pending)
2414 return false;
2415
2416 intel_finish_page_flip(dev, pipe);
2417
2418 return true;
2419}
2420
Daniel Vetterff1f5252012-10-02 15:10:55 +02002421static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01002422{
2423 struct drm_device *dev = (struct drm_device *) arg;
2424 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01002425 u16 iir, new_iir;
2426 u32 pipe_stats[2];
2427 unsigned long irqflags;
Chris Wilsonc2798b12012-04-22 21:13:57 +01002428 int pipe;
2429 u16 flip_mask =
2430 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2431 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2432
2433 atomic_inc(&dev_priv->irq_received);
2434
2435 iir = I915_READ16(IIR);
2436 if (iir == 0)
2437 return IRQ_NONE;
2438
2439 while (iir & ~flip_mask) {
2440 /* Can't rely on pipestat interrupt bit in iir as it might
2441 * have been cleared after the pipestat interrupt was received.
2442 * It doesn't set the bit in iir again, but it still produces
2443 * interrupts (for non-MSI).
2444 */
2445 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2446 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2447 i915_handle_error(dev, false);
2448
2449 for_each_pipe(pipe) {
2450 int reg = PIPESTAT(pipe);
2451 pipe_stats[pipe] = I915_READ(reg);
2452
2453 /*
2454 * Clear the PIPE*STAT regs before the IIR
2455 */
2456 if (pipe_stats[pipe] & 0x8000ffff) {
2457 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2458 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2459 pipe_name(pipe));
2460 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002461 }
2462 }
2463 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2464
2465 I915_WRITE16(IIR, iir & ~flip_mask);
2466 new_iir = I915_READ16(IIR); /* Flush posted writes */
2467
Daniel Vetterd05c6172012-04-26 23:28:09 +02002468 i915_update_dri1_breadcrumb(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002469
2470 if (iir & I915_USER_INTERRUPT)
2471 notify_ring(dev, &dev_priv->ring[RCS]);
2472
2473 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002474 i8xx_handle_vblank(dev, 0, iir))
2475 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002476
2477 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002478 i8xx_handle_vblank(dev, 1, iir))
2479 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002480
2481 iir = new_iir;
2482 }
2483
2484 return IRQ_HANDLED;
2485}
2486
2487static void i8xx_irq_uninstall(struct drm_device * dev)
2488{
2489 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2490 int pipe;
2491
Chris Wilsonc2798b12012-04-22 21:13:57 +01002492 for_each_pipe(pipe) {
2493 /* Clear enable bits; then clear status bits */
2494 I915_WRITE(PIPESTAT(pipe), 0);
2495 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2496 }
2497 I915_WRITE16(IMR, 0xffff);
2498 I915_WRITE16(IER, 0x0);
2499 I915_WRITE16(IIR, I915_READ16(IIR));
2500}
2501
Chris Wilsona266c7d2012-04-24 22:59:44 +01002502static void i915_irq_preinstall(struct drm_device * dev)
2503{
2504 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2505 int pipe;
2506
2507 atomic_set(&dev_priv->irq_received, 0);
2508
2509 if (I915_HAS_HOTPLUG(dev)) {
2510 I915_WRITE(PORT_HOTPLUG_EN, 0);
2511 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2512 }
2513
Chris Wilson00d98eb2012-04-24 22:59:48 +01002514 I915_WRITE16(HWSTAM, 0xeffe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002515 for_each_pipe(pipe)
2516 I915_WRITE(PIPESTAT(pipe), 0);
2517 I915_WRITE(IMR, 0xffffffff);
2518 I915_WRITE(IER, 0x0);
2519 POSTING_READ(IER);
2520}
2521
2522static int i915_irq_postinstall(struct drm_device *dev)
2523{
2524 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson38bde182012-04-24 22:59:50 +01002525 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002526
Chris Wilson38bde182012-04-24 22:59:50 +01002527 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2528
2529 /* Unmask the interrupts that we always want on. */
2530 dev_priv->irq_mask =
2531 ~(I915_ASLE_INTERRUPT |
2532 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2533 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2534 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2535 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2536 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2537
2538 enable_mask =
2539 I915_ASLE_INTERRUPT |
2540 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2541 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2542 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2543 I915_USER_INTERRUPT;
2544
Chris Wilsona266c7d2012-04-24 22:59:44 +01002545 if (I915_HAS_HOTPLUG(dev)) {
Daniel Vetter20afbda2012-12-11 14:05:07 +01002546 I915_WRITE(PORT_HOTPLUG_EN, 0);
2547 POSTING_READ(PORT_HOTPLUG_EN);
2548
Chris Wilsona266c7d2012-04-24 22:59:44 +01002549 /* Enable in IER... */
2550 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2551 /* and unmask in IMR */
2552 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2553 }
2554
Chris Wilsona266c7d2012-04-24 22:59:44 +01002555 I915_WRITE(IMR, dev_priv->irq_mask);
2556 I915_WRITE(IER, enable_mask);
2557 POSTING_READ(IER);
2558
Jani Nikulaf49e38d2013-04-29 13:02:54 +03002559 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01002560
2561 return 0;
2562}
2563
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002564/*
2565 * Returns true when a page flip has completed.
2566 */
2567static bool i915_handle_vblank(struct drm_device *dev,
2568 int plane, int pipe, u32 iir)
2569{
2570 drm_i915_private_t *dev_priv = dev->dev_private;
2571 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2572
2573 if (!drm_handle_vblank(dev, pipe))
2574 return false;
2575
2576 if ((iir & flip_pending) == 0)
2577 return false;
2578
2579 intel_prepare_page_flip(dev, plane);
2580
2581 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2582 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2583 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2584 * the flip is completed (no longer pending). Since this doesn't raise
2585 * an interrupt per se, we watch for the change at vblank.
2586 */
2587 if (I915_READ(ISR) & flip_pending)
2588 return false;
2589
2590 intel_finish_page_flip(dev, pipe);
2591
2592 return true;
2593}
2594
Daniel Vetterff1f5252012-10-02 15:10:55 +02002595static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01002596{
2597 struct drm_device *dev = (struct drm_device *) arg;
2598 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson8291ee92012-04-24 22:59:47 +01002599 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01002600 unsigned long irqflags;
Chris Wilson38bde182012-04-24 22:59:50 +01002601 u32 flip_mask =
2602 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2603 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilson38bde182012-04-24 22:59:50 +01002604 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002605
2606 atomic_inc(&dev_priv->irq_received);
2607
2608 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01002609 do {
2610 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01002611 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002612
2613 /* Can't rely on pipestat interrupt bit in iir as it might
2614 * have been cleared after the pipestat interrupt was received.
2615 * It doesn't set the bit in iir again, but it still produces
2616 * interrupts (for non-MSI).
2617 */
2618 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2619 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2620 i915_handle_error(dev, false);
2621
2622 for_each_pipe(pipe) {
2623 int reg = PIPESTAT(pipe);
2624 pipe_stats[pipe] = I915_READ(reg);
2625
Chris Wilson38bde182012-04-24 22:59:50 +01002626 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01002627 if (pipe_stats[pipe] & 0x8000ffff) {
2628 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2629 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2630 pipe_name(pipe));
2631 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01002632 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002633 }
2634 }
2635 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2636
2637 if (!irq_received)
2638 break;
2639
Chris Wilsona266c7d2012-04-24 22:59:44 +01002640 /* Consume port. Then clear IIR or we'll miss events */
2641 if ((I915_HAS_HOTPLUG(dev)) &&
2642 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2643 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02002644 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002645
2646 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2647 hotplug_status);
Daniel Vetter91d131d2013-06-27 17:52:14 +02002648
2649 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
2650
Chris Wilsona266c7d2012-04-24 22:59:44 +01002651 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
Chris Wilson38bde182012-04-24 22:59:50 +01002652 POSTING_READ(PORT_HOTPLUG_STAT);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002653 }
2654
Chris Wilson38bde182012-04-24 22:59:50 +01002655 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002656 new_iir = I915_READ(IIR); /* Flush posted writes */
2657
Chris Wilsona266c7d2012-04-24 22:59:44 +01002658 if (iir & I915_USER_INTERRUPT)
2659 notify_ring(dev, &dev_priv->ring[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002660
Chris Wilsona266c7d2012-04-24 22:59:44 +01002661 for_each_pipe(pipe) {
Chris Wilson38bde182012-04-24 22:59:50 +01002662 int plane = pipe;
2663 if (IS_MOBILE(dev))
2664 plane = !plane;
Ville Syrjälä5e2032d2013-02-19 15:16:38 +02002665
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002666 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2667 i915_handle_vblank(dev, plane, pipe, iir))
2668 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002669
2670 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2671 blc_event = true;
2672 }
2673
Chris Wilsona266c7d2012-04-24 22:59:44 +01002674 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2675 intel_opregion_asle_intr(dev);
2676
2677 /* With MSI, interrupts are only generated when iir
2678 * transitions from zero to nonzero. If another bit got
2679 * set while we were handling the existing iir bits, then
2680 * we would never get another interrupt.
2681 *
2682 * This is fine on non-MSI as well, as if we hit this path
2683 * we avoid exiting the interrupt handler only to generate
2684 * another one.
2685 *
2686 * Note that for MSI this could cause a stray interrupt report
2687 * if an interrupt landed in the time between writing IIR and
2688 * the posting read. This should be rare enough to never
2689 * trigger the 99% of 100,000 interrupts test for disabling
2690 * stray interrupts.
2691 */
Chris Wilson38bde182012-04-24 22:59:50 +01002692 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002693 iir = new_iir;
Chris Wilson38bde182012-04-24 22:59:50 +01002694 } while (iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002695
Daniel Vetterd05c6172012-04-26 23:28:09 +02002696 i915_update_dri1_breadcrumb(dev);
Chris Wilson8291ee92012-04-24 22:59:47 +01002697
Chris Wilsona266c7d2012-04-24 22:59:44 +01002698 return ret;
2699}
2700
2701static void i915_irq_uninstall(struct drm_device * dev)
2702{
2703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2704 int pipe;
2705
Egbert Eichac4c16c2013-04-16 13:36:58 +02002706 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2707
Chris Wilsona266c7d2012-04-24 22:59:44 +01002708 if (I915_HAS_HOTPLUG(dev)) {
2709 I915_WRITE(PORT_HOTPLUG_EN, 0);
2710 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2711 }
2712
Chris Wilson00d98eb2012-04-24 22:59:48 +01002713 I915_WRITE16(HWSTAM, 0xffff);
Chris Wilson55b39752012-04-24 22:59:49 +01002714 for_each_pipe(pipe) {
2715 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01002716 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01002717 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2718 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01002719 I915_WRITE(IMR, 0xffffffff);
2720 I915_WRITE(IER, 0x0);
2721
Chris Wilsona266c7d2012-04-24 22:59:44 +01002722 I915_WRITE(IIR, I915_READ(IIR));
2723}
2724
2725static void i965_irq_preinstall(struct drm_device * dev)
2726{
2727 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2728 int pipe;
2729
2730 atomic_set(&dev_priv->irq_received, 0);
2731
Chris Wilsonadca4732012-05-11 18:01:31 +01002732 I915_WRITE(PORT_HOTPLUG_EN, 0);
2733 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01002734
2735 I915_WRITE(HWSTAM, 0xeffe);
2736 for_each_pipe(pipe)
2737 I915_WRITE(PIPESTAT(pipe), 0);
2738 I915_WRITE(IMR, 0xffffffff);
2739 I915_WRITE(IER, 0x0);
2740 POSTING_READ(IER);
2741}
2742
2743static int i965_irq_postinstall(struct drm_device *dev)
2744{
2745 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonbbba0a92012-04-24 22:59:51 +01002746 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002747 u32 error_mask;
Daniel Vetterb79480b2013-06-27 17:52:10 +02002748 unsigned long irqflags;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002749
Chris Wilsona266c7d2012-04-24 22:59:44 +01002750 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01002751 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01002752 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01002753 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2754 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2755 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2756 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2757 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2758
2759 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002760 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2761 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01002762 enable_mask |= I915_USER_INTERRUPT;
2763
2764 if (IS_G4X(dev))
2765 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002766
Daniel Vetterb79480b2013-06-27 17:52:10 +02002767 /* Interrupt setup is already guaranteed to be single-threaded, this is
2768 * just to make the assert_spin_locked check happy. */
2769 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01002770 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
Daniel Vetterb79480b2013-06-27 17:52:10 +02002771 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002772
Chris Wilsona266c7d2012-04-24 22:59:44 +01002773 /*
2774 * Enable some error detection, note the instruction error mask
2775 * bit is reserved, so we leave it masked.
2776 */
2777 if (IS_G4X(dev)) {
2778 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2779 GM45_ERROR_MEM_PRIV |
2780 GM45_ERROR_CP_PRIV |
2781 I915_ERROR_MEMORY_REFRESH);
2782 } else {
2783 error_mask = ~(I915_ERROR_PAGE_TABLE |
2784 I915_ERROR_MEMORY_REFRESH);
2785 }
2786 I915_WRITE(EMR, error_mask);
2787
2788 I915_WRITE(IMR, dev_priv->irq_mask);
2789 I915_WRITE(IER, enable_mask);
2790 POSTING_READ(IER);
2791
Daniel Vetter20afbda2012-12-11 14:05:07 +01002792 I915_WRITE(PORT_HOTPLUG_EN, 0);
2793 POSTING_READ(PORT_HOTPLUG_EN);
2794
Jani Nikulaf49e38d2013-04-29 13:02:54 +03002795 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01002796
2797 return 0;
2798}
2799
Egbert Eichbac56d52013-02-25 12:06:51 -05002800static void i915_hpd_irq_setup(struct drm_device *dev)
Daniel Vetter20afbda2012-12-11 14:05:07 +01002801{
2802 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Egbert Eiche5868a32013-02-28 04:17:12 -05002803 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +02002804 struct intel_encoder *intel_encoder;
Daniel Vetter20afbda2012-12-11 14:05:07 +01002805 u32 hotplug_en;
2806
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02002807 assert_spin_locked(&dev_priv->irq_lock);
2808
Egbert Eichbac56d52013-02-25 12:06:51 -05002809 if (I915_HAS_HOTPLUG(dev)) {
2810 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2811 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2812 /* Note HDMI and DP share hotplug bits */
Egbert Eiche5868a32013-02-28 04:17:12 -05002813 /* enable bits are the same for all generations */
Egbert Eichcd569ae2013-04-16 13:36:57 +02002814 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2815 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2816 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
Egbert Eichbac56d52013-02-25 12:06:51 -05002817 /* Programming the CRT detection parameters tends
2818 to generate a spurious hotplug event about three
2819 seconds later. So just do it once.
2820 */
2821 if (IS_G4X(dev))
2822 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Daniel Vetter85fc95b2013-03-27 15:47:11 +01002823 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
Egbert Eichbac56d52013-02-25 12:06:51 -05002824 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002825
Egbert Eichbac56d52013-02-25 12:06:51 -05002826 /* Ignore TV since it's buggy */
2827 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2828 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01002829}
2830
Daniel Vetterff1f5252012-10-02 15:10:55 +02002831static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01002832{
2833 struct drm_device *dev = (struct drm_device *) arg;
2834 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002835 u32 iir, new_iir;
2836 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01002837 unsigned long irqflags;
2838 int irq_received;
2839 int ret = IRQ_NONE, pipe;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002840 u32 flip_mask =
2841 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2842 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002843
2844 atomic_inc(&dev_priv->irq_received);
2845
2846 iir = I915_READ(IIR);
2847
Chris Wilsona266c7d2012-04-24 22:59:44 +01002848 for (;;) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01002849 bool blc_event = false;
2850
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002851 irq_received = (iir & ~flip_mask) != 0;
Chris Wilsona266c7d2012-04-24 22:59:44 +01002852
2853 /* Can't rely on pipestat interrupt bit in iir as it might
2854 * have been cleared after the pipestat interrupt was received.
2855 * It doesn't set the bit in iir again, but it still produces
2856 * interrupts (for non-MSI).
2857 */
2858 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2859 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2860 i915_handle_error(dev, false);
2861
2862 for_each_pipe(pipe) {
2863 int reg = PIPESTAT(pipe);
2864 pipe_stats[pipe] = I915_READ(reg);
2865
2866 /*
2867 * Clear the PIPE*STAT regs before the IIR
2868 */
2869 if (pipe_stats[pipe] & 0x8000ffff) {
2870 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2871 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2872 pipe_name(pipe));
2873 I915_WRITE(reg, pipe_stats[pipe]);
2874 irq_received = 1;
2875 }
2876 }
2877 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2878
2879 if (!irq_received)
2880 break;
2881
2882 ret = IRQ_HANDLED;
2883
2884 /* Consume port. Then clear IIR or we'll miss events */
Chris Wilsonadca4732012-05-11 18:01:31 +01002885 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01002886 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02002887 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2888 HOTPLUG_INT_STATUS_G4X :
Daniel Vetter4f7fd702013-06-24 21:33:28 +02002889 HOTPLUG_INT_STATUS_I915);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002890
2891 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2892 hotplug_status);
Daniel Vetter91d131d2013-06-27 17:52:14 +02002893
2894 intel_hpd_irq_handler(dev, hotplug_trigger,
2895 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
2896
Chris Wilsona266c7d2012-04-24 22:59:44 +01002897 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2898 I915_READ(PORT_HOTPLUG_STAT);
2899 }
2900
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002901 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002902 new_iir = I915_READ(IIR); /* Flush posted writes */
2903
Chris Wilsona266c7d2012-04-24 22:59:44 +01002904 if (iir & I915_USER_INTERRUPT)
2905 notify_ring(dev, &dev_priv->ring[RCS]);
2906 if (iir & I915_BSD_USER_INTERRUPT)
2907 notify_ring(dev, &dev_priv->ring[VCS]);
2908
Chris Wilsona266c7d2012-04-24 22:59:44 +01002909 for_each_pipe(pipe) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01002910 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002911 i915_handle_vblank(dev, pipe, pipe, iir))
2912 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002913
2914 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2915 blc_event = true;
2916 }
2917
2918
2919 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2920 intel_opregion_asle_intr(dev);
2921
Daniel Vetter515ac2b2012-12-01 13:53:44 +01002922 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2923 gmbus_irq_handler(dev);
2924
Chris Wilsona266c7d2012-04-24 22:59:44 +01002925 /* With MSI, interrupts are only generated when iir
2926 * transitions from zero to nonzero. If another bit got
2927 * set while we were handling the existing iir bits, then
2928 * we would never get another interrupt.
2929 *
2930 * This is fine on non-MSI as well, as if we hit this path
2931 * we avoid exiting the interrupt handler only to generate
2932 * another one.
2933 *
2934 * Note that for MSI this could cause a stray interrupt report
2935 * if an interrupt landed in the time between writing IIR and
2936 * the posting read. This should be rare enough to never
2937 * trigger the 99% of 100,000 interrupts test for disabling
2938 * stray interrupts.
2939 */
2940 iir = new_iir;
2941 }
2942
Daniel Vetterd05c6172012-04-26 23:28:09 +02002943 i915_update_dri1_breadcrumb(dev);
Chris Wilson2c8ba292012-04-24 22:59:46 +01002944
Chris Wilsona266c7d2012-04-24 22:59:44 +01002945 return ret;
2946}
2947
2948static void i965_irq_uninstall(struct drm_device * dev)
2949{
2950 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2951 int pipe;
2952
2953 if (!dev_priv)
2954 return;
2955
Egbert Eichac4c16c2013-04-16 13:36:58 +02002956 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2957
Chris Wilsonadca4732012-05-11 18:01:31 +01002958 I915_WRITE(PORT_HOTPLUG_EN, 0);
2959 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01002960
2961 I915_WRITE(HWSTAM, 0xffffffff);
2962 for_each_pipe(pipe)
2963 I915_WRITE(PIPESTAT(pipe), 0);
2964 I915_WRITE(IMR, 0xffffffff);
2965 I915_WRITE(IER, 0x0);
2966
2967 for_each_pipe(pipe)
2968 I915_WRITE(PIPESTAT(pipe),
2969 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2970 I915_WRITE(IIR, I915_READ(IIR));
2971}
2972
Egbert Eichac4c16c2013-04-16 13:36:58 +02002973static void i915_reenable_hotplug_timer_func(unsigned long data)
2974{
2975 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
2976 struct drm_device *dev = dev_priv->dev;
2977 struct drm_mode_config *mode_config = &dev->mode_config;
2978 unsigned long irqflags;
2979 int i;
2980
2981 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2982 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
2983 struct drm_connector *connector;
2984
2985 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
2986 continue;
2987
2988 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
2989
2990 list_for_each_entry(connector, &mode_config->connector_list, head) {
2991 struct intel_connector *intel_connector = to_intel_connector(connector);
2992
2993 if (intel_connector->encoder->hpd_pin == i) {
2994 if (connector->polled != intel_connector->polled)
2995 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
2996 drm_get_connector_name(connector));
2997 connector->polled = intel_connector->polled;
2998 if (!connector->polled)
2999 connector->polled = DRM_CONNECTOR_POLL_HPD;
3000 }
3001 }
3002 }
3003 if (dev_priv->display.hpd_irq_setup)
3004 dev_priv->display.hpd_irq_setup(dev);
3005 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3006}
3007
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003008void intel_irq_init(struct drm_device *dev)
3009{
Chris Wilson8b2e3262012-04-24 22:59:41 +01003010 struct drm_i915_private *dev_priv = dev->dev_private;
3011
3012 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
Daniel Vetter99584db2012-11-14 17:14:04 +01003013 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003014 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01003015 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Chris Wilson8b2e3262012-04-24 22:59:41 +01003016
Daniel Vetter99584db2012-11-14 17:14:04 +01003017 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3018 i915_hangcheck_elapsed,
Daniel Vetter61bac782012-12-01 21:03:21 +01003019 (unsigned long) dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003020 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3021 (unsigned long) dev_priv);
Daniel Vetter61bac782012-12-01 21:03:21 +01003022
Tomas Janousek97a19a22012-12-08 13:48:13 +01003023 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01003024
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003025 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3026 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Eugeni Dodonov7d4e1462012-05-09 15:37:09 -03003027 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003028 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3029 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3030 }
3031
Keith Packardc3613de2011-08-12 17:05:54 -07003032 if (drm_core_check_feature(dev, DRIVER_MODESET))
3033 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3034 else
3035 dev->driver->get_vblank_timestamp = NULL;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003036 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3037
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003038 if (IS_VALLEYVIEW(dev)) {
3039 dev->driver->irq_handler = valleyview_irq_handler;
3040 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3041 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3042 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3043 dev->driver->enable_vblank = valleyview_enable_vblank;
3044 dev->driver->disable_vblank = valleyview_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05003045 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003046 } else if (HAS_PCH_SPLIT(dev)) {
3047 dev->driver->irq_handler = ironlake_irq_handler;
3048 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3049 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3050 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3051 dev->driver->enable_vblank = ironlake_enable_vblank;
3052 dev->driver->disable_vblank = ironlake_disable_vblank;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003053 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003054 } else {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003055 if (INTEL_INFO(dev)->gen == 2) {
3056 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3057 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3058 dev->driver->irq_handler = i8xx_irq_handler;
3059 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003060 } else if (INTEL_INFO(dev)->gen == 3) {
3061 dev->driver->irq_preinstall = i915_irq_preinstall;
3062 dev->driver->irq_postinstall = i915_irq_postinstall;
3063 dev->driver->irq_uninstall = i915_irq_uninstall;
3064 dev->driver->irq_handler = i915_irq_handler;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003065 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003066 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003067 dev->driver->irq_preinstall = i965_irq_preinstall;
3068 dev->driver->irq_postinstall = i965_irq_postinstall;
3069 dev->driver->irq_uninstall = i965_irq_uninstall;
3070 dev->driver->irq_handler = i965_irq_handler;
Egbert Eichbac56d52013-02-25 12:06:51 -05003071 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003072 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003073 dev->driver->enable_vblank = i915_enable_vblank;
3074 dev->driver->disable_vblank = i915_disable_vblank;
3075 }
3076}
Daniel Vetter20afbda2012-12-11 14:05:07 +01003077
3078void intel_hpd_init(struct drm_device *dev)
3079{
3080 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eich821450c2013-04-16 13:36:55 +02003081 struct drm_mode_config *mode_config = &dev->mode_config;
3082 struct drm_connector *connector;
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003083 unsigned long irqflags;
Egbert Eich821450c2013-04-16 13:36:55 +02003084 int i;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003085
Egbert Eich821450c2013-04-16 13:36:55 +02003086 for (i = 1; i < HPD_NUM_PINS; i++) {
3087 dev_priv->hpd_stats[i].hpd_cnt = 0;
3088 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3089 }
3090 list_for_each_entry(connector, &mode_config->connector_list, head) {
3091 struct intel_connector *intel_connector = to_intel_connector(connector);
3092 connector->polled = intel_connector->polled;
3093 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3094 connector->polled = DRM_CONNECTOR_POLL_HPD;
3095 }
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003096
3097 /* Interrupt setup is already guaranteed to be single-threaded, this is
3098 * just to make the assert_spin_locked checks happy. */
3099 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003100 if (dev_priv->display.hpd_irq_setup)
3101 dev_priv->display.hpd_irq_setup(dev);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003102 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003103}