blob: 5d5bb8ce0fb40e10c2ae315270acf56d43e69d6c [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
34#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010036#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Egbert Eiche5868a32013-02-28 04:17:12 -050039static const u32 hpd_ibx[] = {
40 [HPD_CRT] = SDE_CRT_HOTPLUG,
41 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
45};
46
47static const u32 hpd_cpt[] = {
48 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010049 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050050 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53};
54
55static const u32 hpd_mask_i915[] = {
56 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62};
63
64static const u32 hpd_status_gen4[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71};
72
73static const u32 hpd_status_i965[] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80};
81
82static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89};
90
Egbert Eichcd569ae2013-04-16 13:36:57 +020091static void ibx_hpd_irq_setup(struct drm_device *dev);
92static void i915_hpd_irq_setup(struct drm_device *dev);
Egbert Eiche5868a32013-02-28 04:17:12 -050093
Zhenyu Wang036a4a72009-06-08 14:40:19 +080094/* For display hotplug interrupt */
Chris Wilson995b6762010-08-20 13:23:26 +010095static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -050096ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +080097{
Chris Wilson1ec14ad2010-12-04 11:30:53 +000098 if ((dev_priv->irq_mask & mask) != 0) {
99 dev_priv->irq_mask &= ~mask;
100 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000101 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800102 }
103}
104
Paulo Zanoni0ff98002013-02-22 17:05:31 -0300105static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -0500106ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800107{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000108 if ((dev_priv->irq_mask & mask) != mask) {
109 dev_priv->irq_mask |= mask;
110 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000111 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800112 }
113}
114
Paulo Zanoni86642812013-04-12 17:57:57 -0300115static bool ivb_can_enable_err_int(struct drm_device *dev)
116{
117 struct drm_i915_private *dev_priv = dev->dev_private;
118 struct intel_crtc *crtc;
119 enum pipe pipe;
120
121 for_each_pipe(pipe) {
122 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
123
124 if (crtc->cpu_fifo_underrun_disabled)
125 return false;
126 }
127
128 return true;
129}
130
131static bool cpt_can_enable_serr_int(struct drm_device *dev)
132{
133 struct drm_i915_private *dev_priv = dev->dev_private;
134 enum pipe pipe;
135 struct intel_crtc *crtc;
136
137 for_each_pipe(pipe) {
138 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
139
140 if (crtc->pch_fifo_underrun_disabled)
141 return false;
142 }
143
144 return true;
145}
146
147static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
148 enum pipe pipe, bool enable)
149{
150 struct drm_i915_private *dev_priv = dev->dev_private;
151 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
152 DE_PIPEB_FIFO_UNDERRUN;
153
154 if (enable)
155 ironlake_enable_display_irq(dev_priv, bit);
156 else
157 ironlake_disable_display_irq(dev_priv, bit);
158}
159
160static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
161 bool enable)
162{
163 struct drm_i915_private *dev_priv = dev->dev_private;
164
165 if (enable) {
166 if (!ivb_can_enable_err_int(dev))
167 return;
168
169 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
170 ERR_INT_FIFO_UNDERRUN_B |
171 ERR_INT_FIFO_UNDERRUN_C);
172
173 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
174 } else {
175 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
176 }
177}
178
179static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
180 bool enable)
181{
182 struct drm_device *dev = crtc->base.dev;
183 struct drm_i915_private *dev_priv = dev->dev_private;
184 uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
185 SDE_TRANSB_FIFO_UNDER;
186
187 if (enable)
188 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
189 else
190 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
191
192 POSTING_READ(SDEIMR);
193}
194
195static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
196 enum transcoder pch_transcoder,
197 bool enable)
198{
199 struct drm_i915_private *dev_priv = dev->dev_private;
200
201 if (enable) {
202 if (!cpt_can_enable_serr_int(dev))
203 return;
204
205 I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
206 SERR_INT_TRANS_B_FIFO_UNDERRUN |
207 SERR_INT_TRANS_C_FIFO_UNDERRUN);
208
209 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
210 } else {
211 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
212 }
213
214 POSTING_READ(SDEIMR);
215}
216
217/**
218 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
219 * @dev: drm device
220 * @pipe: pipe
221 * @enable: true if we want to report FIFO underrun errors, false otherwise
222 *
223 * This function makes us disable or enable CPU fifo underruns for a specific
224 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
225 * reporting for one pipe may also disable all the other CPU error interruts for
226 * the other pipes, due to the fact that there's just one interrupt mask/enable
227 * bit for all the pipes.
228 *
229 * Returns the previous state of underrun reporting.
230 */
231bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
232 enum pipe pipe, bool enable)
233{
234 struct drm_i915_private *dev_priv = dev->dev_private;
235 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
236 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
237 unsigned long flags;
238 bool ret;
239
240 spin_lock_irqsave(&dev_priv->irq_lock, flags);
241
242 ret = !intel_crtc->cpu_fifo_underrun_disabled;
243
244 if (enable == ret)
245 goto done;
246
247 intel_crtc->cpu_fifo_underrun_disabled = !enable;
248
249 if (IS_GEN5(dev) || IS_GEN6(dev))
250 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
251 else if (IS_GEN7(dev))
252 ivybridge_set_fifo_underrun_reporting(dev, enable);
253
254done:
255 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
256 return ret;
257}
258
259/**
260 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
261 * @dev: drm device
262 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
263 * @enable: true if we want to report FIFO underrun errors, false otherwise
264 *
265 * This function makes us disable or enable PCH fifo underruns for a specific
266 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
267 * underrun reporting for one transcoder may also disable all the other PCH
268 * error interruts for the other transcoders, due to the fact that there's just
269 * one interrupt mask/enable bit for all the transcoders.
270 *
271 * Returns the previous state of underrun reporting.
272 */
273bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
274 enum transcoder pch_transcoder,
275 bool enable)
276{
277 struct drm_i915_private *dev_priv = dev->dev_private;
278 enum pipe p;
279 struct drm_crtc *crtc;
280 struct intel_crtc *intel_crtc;
281 unsigned long flags;
282 bool ret;
283
284 if (HAS_PCH_LPT(dev)) {
285 crtc = NULL;
286 for_each_pipe(p) {
287 struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
288 if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
289 crtc = c;
290 break;
291 }
292 }
293 if (!crtc) {
294 DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
295 return false;
296 }
297 } else {
298 crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
299 }
300 intel_crtc = to_intel_crtc(crtc);
301
302 spin_lock_irqsave(&dev_priv->irq_lock, flags);
303
304 ret = !intel_crtc->pch_fifo_underrun_disabled;
305
306 if (enable == ret)
307 goto done;
308
309 intel_crtc->pch_fifo_underrun_disabled = !enable;
310
311 if (HAS_PCH_IBX(dev))
312 ibx_set_fifo_underrun_reporting(intel_crtc, enable);
313 else
314 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
315
316done:
317 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
318 return ret;
319}
320
321
Keith Packard7c463582008-11-04 02:03:27 -0800322void
323i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
324{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200325 u32 reg = PIPESTAT(pipe);
326 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800327
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200328 if ((pipestat & mask) == mask)
329 return;
330
331 /* Enable the interrupt, clear any pending status */
332 pipestat |= mask | (mask >> 16);
333 I915_WRITE(reg, pipestat);
334 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800335}
336
337void
338i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
339{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200340 u32 reg = PIPESTAT(pipe);
341 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800342
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200343 if ((pipestat & mask) == 0)
344 return;
345
346 pipestat &= ~mask;
347 I915_WRITE(reg, pipestat);
348 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800349}
350
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000351/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300352 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Zhao Yakui01c66882009-10-28 05:10:00 +0000353 */
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300354static void i915_enable_asle_pipestat(struct drm_device *dev)
Zhao Yakui01c66882009-10-28 05:10:00 +0000355{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000356 drm_i915_private_t *dev_priv = dev->dev_private;
357 unsigned long irqflags;
358
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300359 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
360 return;
361
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000362 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000363
Jani Nikulaf8987802013-04-29 13:02:53 +0300364 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
365 if (INTEL_INFO(dev)->gen >= 4)
366 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000367
368 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000369}
370
371/**
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700372 * i915_pipe_enabled - check if a pipe is enabled
373 * @dev: DRM device
374 * @pipe: pipe to check
375 *
376 * Reading certain registers when the pipe is disabled can hang the chip.
377 * Use this routine to make sure the PLL is running and the pipe is active
378 * before reading such registers if unsure.
379 */
380static int
381i915_pipe_enabled(struct drm_device *dev, int pipe)
382{
383 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni702e7a52012-10-23 18:29:59 -0200384
Daniel Vettera01025a2013-05-22 00:50:23 +0200385 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
386 /* Locking is horribly broken here, but whatever. */
387 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
388 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni71f8ba62013-05-03 12:15:39 -0300389
Daniel Vettera01025a2013-05-22 00:50:23 +0200390 return intel_crtc->active;
391 } else {
392 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
393 }
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700394}
395
Keith Packard42f52ef2008-10-18 19:39:29 -0700396/* Called from drm generic code, passed a 'crtc', which
397 * we use as a pipe index
398 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700399static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700400{
401 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
402 unsigned long high_frame;
403 unsigned long low_frame;
Chris Wilson5eddb702010-09-11 13:48:45 +0100404 u32 high1, high2, low;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700405
406 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800407 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800408 "pipe %c\n", pipe_name(pipe));
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700409 return 0;
410 }
411
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800412 high_frame = PIPEFRAME(pipe);
413 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100414
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700415 /*
416 * High & low register fields aren't synchronized, so make sure
417 * we get a low value that's stable across two reads of the high
418 * register.
419 */
420 do {
Chris Wilson5eddb702010-09-11 13:48:45 +0100421 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
422 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
423 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700424 } while (high1 != high2);
425
Chris Wilson5eddb702010-09-11 13:48:45 +0100426 high1 >>= PIPE_FRAME_HIGH_SHIFT;
427 low >>= PIPE_FRAME_LOW_SHIFT;
428 return (high1 << 8) | low;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700429}
430
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700431static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800432{
433 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800434 int reg = PIPE_FRMCOUNT_GM45(pipe);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800435
436 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800437 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800438 "pipe %c\n", pipe_name(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800439 return 0;
440 }
441
442 return I915_READ(reg);
443}
444
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700445static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100446 int *vpos, int *hpos)
447{
448 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
449 u32 vbl = 0, position = 0;
450 int vbl_start, vbl_end, htotal, vtotal;
451 bool in_vbl = true;
452 int ret = 0;
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200453 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
454 pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100455
456 if (!i915_pipe_enabled(dev, pipe)) {
457 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800458 "pipe %c\n", pipe_name(pipe));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100459 return 0;
460 }
461
462 /* Get vtotal. */
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200463 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100464
465 if (INTEL_INFO(dev)->gen >= 4) {
466 /* No obvious pixelcount register. Only query vertical
467 * scanout position from Display scan line register.
468 */
469 position = I915_READ(PIPEDSL(pipe));
470
471 /* Decode into vertical scanout position. Don't have
472 * horizontal scanout position.
473 */
474 *vpos = position & 0x1fff;
475 *hpos = 0;
476 } else {
477 /* Have access to pixelcount since start of frame.
478 * We can split this into vertical and horizontal
479 * scanout position.
480 */
481 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
482
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200483 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100484 *vpos = position / htotal;
485 *hpos = position - (*vpos * htotal);
486 }
487
488 /* Query vblank area. */
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200489 vbl = I915_READ(VBLANK(cpu_transcoder));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100490
491 /* Test position against vblank region. */
492 vbl_start = vbl & 0x1fff;
493 vbl_end = (vbl >> 16) & 0x1fff;
494
495 if ((*vpos < vbl_start) || (*vpos > vbl_end))
496 in_vbl = false;
497
498 /* Inside "upper part" of vblank area? Apply corrective offset: */
499 if (in_vbl && (*vpos >= vbl_start))
500 *vpos = *vpos - vtotal;
501
502 /* Readouts valid? */
503 if (vbl > 0)
504 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
505
506 /* In vblank? */
507 if (in_vbl)
508 ret |= DRM_SCANOUTPOS_INVBL;
509
510 return ret;
511}
512
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700513static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100514 int *max_error,
515 struct timeval *vblank_time,
516 unsigned flags)
517{
Chris Wilson4041b852011-01-22 10:07:56 +0000518 struct drm_crtc *crtc;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100519
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700520 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
Chris Wilson4041b852011-01-22 10:07:56 +0000521 DRM_ERROR("Invalid crtc %d\n", pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100522 return -EINVAL;
523 }
524
525 /* Get drm_crtc to timestamp: */
Chris Wilson4041b852011-01-22 10:07:56 +0000526 crtc = intel_get_crtc_for_pipe(dev, pipe);
527 if (crtc == NULL) {
528 DRM_ERROR("Invalid crtc %d\n", pipe);
529 return -EINVAL;
530 }
531
532 if (!crtc->enabled) {
533 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
534 return -EBUSY;
535 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100536
537 /* Helper routine in DRM core does all the work: */
Chris Wilson4041b852011-01-22 10:07:56 +0000538 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
539 vblank_time, flags,
540 crtc);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100541}
542
Egbert Eich321a1b32013-04-11 16:00:26 +0200543static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
544{
545 enum drm_connector_status old_status;
546
547 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
548 old_status = connector->status;
549
550 connector->status = connector->funcs->detect(connector, false);
551 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
552 connector->base.id,
553 drm_get_connector_name(connector),
554 old_status, connector->status);
555 return (old_status != connector->status);
556}
557
Jesse Barnes5ca58282009-03-31 14:11:15 -0700558/*
559 * Handle hotplug events outside the interrupt handler proper.
560 */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200561#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
562
Jesse Barnes5ca58282009-03-31 14:11:15 -0700563static void i915_hotplug_work_func(struct work_struct *work)
564{
565 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
566 hotplug_work);
567 struct drm_device *dev = dev_priv->dev;
Keith Packardc31c4ba2009-05-06 11:48:58 -0700568 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200569 struct intel_connector *intel_connector;
570 struct intel_encoder *intel_encoder;
571 struct drm_connector *connector;
572 unsigned long irqflags;
573 bool hpd_disabled = false;
Egbert Eich321a1b32013-04-11 16:00:26 +0200574 bool changed = false;
Egbert Eich142e2392013-04-11 15:57:57 +0200575 u32 hpd_event_bits;
Jesse Barnes5ca58282009-03-31 14:11:15 -0700576
Daniel Vetter52d7ece2012-12-01 21:03:22 +0100577 /* HPD irq before everything is fully set up. */
578 if (!dev_priv->enable_hotplug_processing)
579 return;
580
Keith Packarda65e34c2011-07-25 10:04:56 -0700581 mutex_lock(&mode_config->mutex);
Jesse Barnese67189ab2011-02-11 14:44:51 -0800582 DRM_DEBUG_KMS("running encoder hotplug functions\n");
583
Egbert Eichcd569ae2013-04-16 13:36:57 +0200584 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Egbert Eich142e2392013-04-11 15:57:57 +0200585
586 hpd_event_bits = dev_priv->hpd_event_bits;
587 dev_priv->hpd_event_bits = 0;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200588 list_for_each_entry(connector, &mode_config->connector_list, head) {
589 intel_connector = to_intel_connector(connector);
590 intel_encoder = intel_connector->encoder;
591 if (intel_encoder->hpd_pin > HPD_NONE &&
592 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
593 connector->polled == DRM_CONNECTOR_POLL_HPD) {
594 DRM_INFO("HPD interrupt storm detected on connector %s: "
595 "switching from hotplug detection to polling\n",
596 drm_get_connector_name(connector));
597 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
598 connector->polled = DRM_CONNECTOR_POLL_CONNECT
599 | DRM_CONNECTOR_POLL_DISCONNECT;
600 hpd_disabled = true;
601 }
Egbert Eich142e2392013-04-11 15:57:57 +0200602 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
603 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
604 drm_get_connector_name(connector), intel_encoder->hpd_pin);
605 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200606 }
607 /* if there were no outputs to poll, poll was disabled,
608 * therefore make sure it's enabled when disabling HPD on
609 * some connectors */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200610 if (hpd_disabled) {
Egbert Eichcd569ae2013-04-16 13:36:57 +0200611 drm_kms_helper_poll_enable(dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +0200612 mod_timer(&dev_priv->hotplug_reenable_timer,
613 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
614 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200615
616 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
617
Egbert Eich321a1b32013-04-11 16:00:26 +0200618 list_for_each_entry(connector, &mode_config->connector_list, head) {
619 intel_connector = to_intel_connector(connector);
620 intel_encoder = intel_connector->encoder;
621 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
622 if (intel_encoder->hot_plug)
623 intel_encoder->hot_plug(intel_encoder);
624 if (intel_hpd_irq_event(dev, connector))
625 changed = true;
626 }
627 }
Keith Packard40ee3382011-07-28 15:31:19 -0700628 mutex_unlock(&mode_config->mutex);
629
Egbert Eich321a1b32013-04-11 16:00:26 +0200630 if (changed)
631 drm_kms_helper_hotplug_event(dev);
Jesse Barnes5ca58282009-03-31 14:11:15 -0700632}
633
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200634static void ironlake_handle_rps_change(struct drm_device *dev)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800635{
636 drm_i915_private_t *dev_priv = dev->dev_private;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000637 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +0200638 u8 new_delay;
639 unsigned long flags;
640
641 spin_lock_irqsave(&mchdev_lock, flags);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800642
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200643 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
644
Daniel Vetter20e4d402012-08-08 23:35:39 +0200645 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200646
Jesse Barnes7648fa92010-05-20 14:28:11 -0700647 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000648 busy_up = I915_READ(RCPREVBSYTUPAVG);
649 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800650 max_avg = I915_READ(RCBMAXAVG);
651 min_avg = I915_READ(RCBMINAVG);
652
653 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000654 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200655 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
656 new_delay = dev_priv->ips.cur_delay - 1;
657 if (new_delay < dev_priv->ips.max_delay)
658 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000659 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200660 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
661 new_delay = dev_priv->ips.cur_delay + 1;
662 if (new_delay > dev_priv->ips.min_delay)
663 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800664 }
665
Jesse Barnes7648fa92010-05-20 14:28:11 -0700666 if (ironlake_set_drps(dev, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +0200667 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800668
Daniel Vetter92703882012-08-09 16:46:01 +0200669 spin_unlock_irqrestore(&mchdev_lock, flags);
670
Jesse Barnesf97108d2010-01-29 11:27:07 -0800671 return;
672}
673
Chris Wilson549f7362010-10-19 11:19:32 +0100674static void notify_ring(struct drm_device *dev,
675 struct intel_ring_buffer *ring)
676{
677 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9862e602011-01-04 22:22:17 +0000678
Chris Wilson475553d2011-01-20 09:52:56 +0000679 if (ring->obj == NULL)
680 return;
681
Chris Wilsonb2eadbc2012-08-09 10:58:30 +0100682 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
Chris Wilson9862e602011-01-04 22:22:17 +0000683
Chris Wilson549f7362010-10-19 11:19:32 +0100684 wake_up_all(&ring->irq_queue);
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -0700685 if (i915_enable_hangcheck) {
Daniel Vetter99584db2012-11-14 17:14:04 +0100686 dev_priv->gpu_error.hangcheck_count = 0;
687 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
Chris Wilsoncecc21f2012-10-05 17:02:56 +0100688 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -0700689 }
Chris Wilson549f7362010-10-19 11:19:32 +0100690}
691
Ben Widawsky4912d042011-04-25 11:25:20 -0700692static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800693{
Ben Widawsky4912d042011-04-25 11:25:20 -0700694 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200695 rps.work);
Ben Widawsky4912d042011-04-25 11:25:20 -0700696 u32 pm_iir, pm_imr;
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100697 u8 new_delay;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800698
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200699 spin_lock_irq(&dev_priv->rps.lock);
700 pm_iir = dev_priv->rps.pm_iir;
701 dev_priv->rps.pm_iir = 0;
Ben Widawsky4912d042011-04-25 11:25:20 -0700702 pm_imr = I915_READ(GEN6_PMIMR);
Daniel Vettera9e26412011-09-08 14:00:21 +0200703 I915_WRITE(GEN6_PMIMR, 0);
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200704 spin_unlock_irq(&dev_priv->rps.lock);
Ben Widawsky4912d042011-04-25 11:25:20 -0700705
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100706 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800707 return;
708
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700709 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100710
711 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200712 new_delay = dev_priv->rps.cur_delay + 1;
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100713 else
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200714 new_delay = dev_priv->rps.cur_delay - 1;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800715
Ben Widawsky79249632012-09-07 19:43:42 -0700716 /* sysfs frequency interfaces may have snuck in while servicing the
717 * interrupt
718 */
719 if (!(new_delay > dev_priv->rps.max_delay ||
720 new_delay < dev_priv->rps.min_delay)) {
Jesse Barnes0a073b82013-04-17 15:54:58 -0700721 if (IS_VALLEYVIEW(dev_priv->dev))
722 valleyview_set_rps(dev_priv->dev, new_delay);
723 else
724 gen6_set_rps(dev_priv->dev, new_delay);
Ben Widawsky79249632012-09-07 19:43:42 -0700725 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800726
Jesse Barnes52ceb902013-04-23 10:09:26 -0700727 if (IS_VALLEYVIEW(dev_priv->dev)) {
728 /*
729 * On VLV, when we enter RC6 we may not be at the minimum
730 * voltage level, so arm a timer to check. It should only
731 * fire when there's activity or once after we've entered
732 * RC6, and then won't be re-armed until the next RPS interrupt.
733 */
734 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
735 msecs_to_jiffies(100));
736 }
737
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700738 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800739}
740
Ben Widawskye3689192012-05-25 16:56:22 -0700741
742/**
743 * ivybridge_parity_work - Workqueue called when a parity error interrupt
744 * occurred.
745 * @work: workqueue struct
746 *
747 * Doesn't actually do anything except notify userspace. As a consequence of
748 * this event, userspace should try to remap the bad rows since statistically
749 * it is likely the same row is more likely to go bad again.
750 */
751static void ivybridge_parity_work(struct work_struct *work)
752{
753 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vettera4da4fa2012-11-02 19:55:07 +0100754 l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -0700755 u32 error_status, row, bank, subbank;
756 char *parity_event[5];
757 uint32_t misccpctl;
758 unsigned long flags;
759
760 /* We must turn off DOP level clock gating to access the L3 registers.
761 * In order to prevent a get/put style interface, acquire struct mutex
762 * any time we access those registers.
763 */
764 mutex_lock(&dev_priv->dev->struct_mutex);
765
766 misccpctl = I915_READ(GEN7_MISCCPCTL);
767 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
768 POSTING_READ(GEN7_MISCCPCTL);
769
770 error_status = I915_READ(GEN7_L3CDERRST1);
771 row = GEN7_PARITY_ERROR_ROW(error_status);
772 bank = GEN7_PARITY_ERROR_BANK(error_status);
773 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
774
775 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
776 GEN7_L3CDERRST1_ENABLE);
777 POSTING_READ(GEN7_L3CDERRST1);
778
779 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
780
781 spin_lock_irqsave(&dev_priv->irq_lock, flags);
782 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
783 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
784 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
785
786 mutex_unlock(&dev_priv->dev->struct_mutex);
787
788 parity_event[0] = "L3_PARITY_ERROR=1";
789 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
790 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
791 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
792 parity_event[4] = NULL;
793
794 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
795 KOBJ_CHANGE, parity_event);
796
797 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
798 row, bank, subbank);
799
800 kfree(parity_event[3]);
801 kfree(parity_event[2]);
802 kfree(parity_event[1]);
803}
804
Daniel Vetterd2ba8472012-05-31 14:57:41 +0200805static void ivybridge_handle_parity_error(struct drm_device *dev)
Ben Widawskye3689192012-05-25 16:56:22 -0700806{
807 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
808 unsigned long flags;
809
Ben Widawskye1ef7cc2012-07-24 20:47:31 -0700810 if (!HAS_L3_GPU_CACHE(dev))
Ben Widawskye3689192012-05-25 16:56:22 -0700811 return;
812
813 spin_lock_irqsave(&dev_priv->irq_lock, flags);
814 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
815 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
816 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
817
Daniel Vettera4da4fa2012-11-02 19:55:07 +0100818 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -0700819}
820
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200821static void snb_gt_irq_handler(struct drm_device *dev,
822 struct drm_i915_private *dev_priv,
823 u32 gt_iir)
824{
825
826 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
827 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
828 notify_ring(dev, &dev_priv->ring[RCS]);
829 if (gt_iir & GEN6_BSD_USER_INTERRUPT)
830 notify_ring(dev, &dev_priv->ring[VCS]);
831 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
832 notify_ring(dev, &dev_priv->ring[BCS]);
833
834 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
835 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
836 GT_RENDER_CS_ERROR_INTERRUPT)) {
837 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
838 i915_handle_error(dev, false);
839 }
Ben Widawskye3689192012-05-25 16:56:22 -0700840
841 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
842 ivybridge_handle_parity_error(dev);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200843}
844
Ben Widawskybaf02a12013-05-28 19:22:24 -0700845/* Legacy way of handling PM interrupts */
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100846static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
847 u32 pm_iir)
848{
849 unsigned long flags;
850
851 /*
852 * IIR bits should never already be set because IMR should
853 * prevent an interrupt from being shown in IIR. The warning
854 * displays a case where we've unsafely cleared
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200855 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100856 * type is not a problem, it displays a problem in the logic.
857 *
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200858 * The mask bit in IMR is cleared by dev_priv->rps.work.
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100859 */
860
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200861 spin_lock_irqsave(&dev_priv->rps.lock, flags);
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200862 dev_priv->rps.pm_iir |= pm_iir;
863 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100864 POSTING_READ(GEN6_PMIMR);
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200865 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100866
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200867 queue_work(dev_priv->wq, &dev_priv->rps.work);
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100868}
869
Egbert Eichb543fb02013-04-16 13:36:54 +0200870#define HPD_STORM_DETECT_PERIOD 1000
871#define HPD_STORM_THRESHOLD 5
872
Egbert Eichcd569ae2013-04-16 13:36:57 +0200873static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
Egbert Eichb543fb02013-04-16 13:36:54 +0200874 u32 hotplug_trigger,
875 const u32 *hpd)
876{
877 drm_i915_private_t *dev_priv = dev->dev_private;
878 unsigned long irqflags;
879 int i;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200880 bool ret = false;
Egbert Eichb543fb02013-04-16 13:36:54 +0200881
882 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
883
884 for (i = 1; i < HPD_NUM_PINS; i++) {
Egbert Eich821450c2013-04-16 13:36:55 +0200885
Egbert Eichb543fb02013-04-16 13:36:54 +0200886 if (!(hpd[i] & hotplug_trigger) ||
887 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
888 continue;
889
Jani Nikulabc5ead8c2013-05-07 15:10:29 +0300890 dev_priv->hpd_event_bits |= (1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +0200891 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
892 dev_priv->hpd_stats[i].hpd_last_jiffies
893 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
894 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
895 dev_priv->hpd_stats[i].hpd_cnt = 0;
896 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
897 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
Egbert Eich142e2392013-04-11 15:57:57 +0200898 dev_priv->hpd_event_bits &= ~(1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +0200899 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
Egbert Eichcd569ae2013-04-16 13:36:57 +0200900 ret = true;
Egbert Eichb543fb02013-04-16 13:36:54 +0200901 } else {
902 dev_priv->hpd_stats[i].hpd_cnt++;
903 }
904 }
905
906 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Egbert Eichcd569ae2013-04-16 13:36:57 +0200907
908 return ret;
Egbert Eichb543fb02013-04-16 13:36:54 +0200909}
910
Daniel Vetter515ac2b2012-12-01 13:53:44 +0100911static void gmbus_irq_handler(struct drm_device *dev)
912{
Daniel Vetter28c70f12012-12-01 13:53:45 +0100913 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
914
Daniel Vetter28c70f12012-12-01 13:53:45 +0100915 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +0100916}
917
Daniel Vetterce99c252012-12-01 13:53:47 +0100918static void dp_aux_irq_handler(struct drm_device *dev)
919{
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100920 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
921
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100922 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +0100923}
924
Ben Widawskybaf02a12013-05-28 19:22:24 -0700925/* Unlike gen6_queue_rps_work() from which this function is originally derived,
926 * we must be able to deal with other PM interrupts. This is complicated because
927 * of the way in which we use the masks to defer the RPS work (which for
928 * posterity is necessary because of forcewake).
929 */
930static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
931 u32 pm_iir)
932{
933 unsigned long flags;
934
935 spin_lock_irqsave(&dev_priv->rps.lock, flags);
936 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_DEFERRED_EVENTS;
937 if (dev_priv->rps.pm_iir) {
938 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
939 /* never want to mask useful interrupts. (also posting read) */
940 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_DEFERRED_EVENTS);
941 /* TODO: if queue_work is slow, move it out of the spinlock */
942 queue_work(dev_priv->wq, &dev_priv->rps.work);
943 }
944 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
945
946 if (pm_iir & ~GEN6_PM_DEFERRED_EVENTS)
947 DRM_ERROR("Unexpected PM interrupted\n");
948}
949
Daniel Vetterff1f5252012-10-02 15:10:55 +0200950static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700951{
952 struct drm_device *dev = (struct drm_device *) arg;
953 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
954 u32 iir, gt_iir, pm_iir;
955 irqreturn_t ret = IRQ_NONE;
956 unsigned long irqflags;
957 int pipe;
958 u32 pipe_stats[I915_MAX_PIPES];
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700959
960 atomic_inc(&dev_priv->irq_received);
961
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700962 while (true) {
963 iir = I915_READ(VLV_IIR);
964 gt_iir = I915_READ(GTIIR);
965 pm_iir = I915_READ(GEN6_PMIIR);
966
967 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
968 goto out;
969
970 ret = IRQ_HANDLED;
971
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200972 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700973
974 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
975 for_each_pipe(pipe) {
976 int reg = PIPESTAT(pipe);
977 pipe_stats[pipe] = I915_READ(reg);
978
979 /*
980 * Clear the PIPE*STAT regs before the IIR
981 */
982 if (pipe_stats[pipe] & 0x8000ffff) {
983 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
984 DRM_DEBUG_DRIVER("pipe %c underrun\n",
985 pipe_name(pipe));
986 I915_WRITE(reg, pipe_stats[pipe]);
987 }
988 }
989 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
990
Jesse Barnes31acc7f2012-06-20 10:53:11 -0700991 for_each_pipe(pipe) {
992 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
993 drm_handle_vblank(dev, pipe);
994
995 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
996 intel_prepare_page_flip(dev, pipe);
997 intel_finish_page_flip(dev, pipe);
998 }
999 }
1000
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001001 /* Consume port. Then clear IIR or we'll miss events */
1002 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1003 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02001004 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001005
1006 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1007 hotplug_status);
Egbert Eichb543fb02013-04-16 13:36:54 +02001008 if (hotplug_trigger) {
Egbert Eichcd569ae2013-04-16 13:36:57 +02001009 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
1010 i915_hpd_irq_setup(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001011 queue_work(dev_priv->wq,
1012 &dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02001013 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001014 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1015 I915_READ(PORT_HOTPLUG_STAT);
1016 }
1017
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001018 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1019 gmbus_irq_handler(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001020
Chris Wilsonfc6826d2012-04-15 11:56:03 +01001021 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
1022 gen6_queue_rps_work(dev_priv, pm_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001023
1024 I915_WRITE(GTIIR, gt_iir);
1025 I915_WRITE(GEN6_PMIIR, pm_iir);
1026 I915_WRITE(VLV_IIR, iir);
1027 }
1028
1029out:
1030 return ret;
1031}
1032
Adam Jackson23e81d62012-06-06 15:45:44 -04001033static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08001034{
1035 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001036 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001037 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08001038
Egbert Eichb543fb02013-04-16 13:36:54 +02001039 if (hotplug_trigger) {
Egbert Eichcd569ae2013-04-16 13:36:57 +02001040 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
1041 ibx_hpd_irq_setup(dev);
Daniel Vetter76e43832012-10-12 20:14:05 +02001042 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02001043 }
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001044 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1045 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1046 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08001047 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001048 port_name(port));
1049 }
Jesse Barnes776ad802011-01-04 15:09:39 -08001050
Daniel Vetterce99c252012-12-01 13:53:47 +01001051 if (pch_iir & SDE_AUX_MASK)
1052 dp_aux_irq_handler(dev);
1053
Jesse Barnes776ad802011-01-04 15:09:39 -08001054 if (pch_iir & SDE_GMBUS)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001055 gmbus_irq_handler(dev);
Jesse Barnes776ad802011-01-04 15:09:39 -08001056
1057 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1058 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1059
1060 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1061 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1062
1063 if (pch_iir & SDE_POISON)
1064 DRM_ERROR("PCH poison interrupt\n");
1065
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001066 if (pch_iir & SDE_FDI_MASK)
1067 for_each_pipe(pipe)
1068 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1069 pipe_name(pipe),
1070 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08001071
1072 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1073 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1074
1075 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1076 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1077
Jesse Barnes776ad802011-01-04 15:09:39 -08001078 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Paulo Zanoni86642812013-04-12 17:57:57 -03001079 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1080 false))
1081 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1082
1083 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1084 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1085 false))
1086 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1087}
1088
1089static void ivb_err_int_handler(struct drm_device *dev)
1090{
1091 struct drm_i915_private *dev_priv = dev->dev_private;
1092 u32 err_int = I915_READ(GEN7_ERR_INT);
1093
Paulo Zanonide032bf2013-04-12 17:57:58 -03001094 if (err_int & ERR_INT_POISON)
1095 DRM_ERROR("Poison interrupt\n");
1096
Paulo Zanoni86642812013-04-12 17:57:57 -03001097 if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1098 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1099 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1100
1101 if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1102 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1103 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1104
1105 if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1106 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1107 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1108
1109 I915_WRITE(GEN7_ERR_INT, err_int);
1110}
1111
1112static void cpt_serr_int_handler(struct drm_device *dev)
1113{
1114 struct drm_i915_private *dev_priv = dev->dev_private;
1115 u32 serr_int = I915_READ(SERR_INT);
1116
Paulo Zanonide032bf2013-04-12 17:57:58 -03001117 if (serr_int & SERR_INT_POISON)
1118 DRM_ERROR("PCH poison interrupt\n");
1119
Paulo Zanoni86642812013-04-12 17:57:57 -03001120 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1121 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1122 false))
1123 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1124
1125 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1126 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1127 false))
1128 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1129
1130 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1131 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1132 false))
1133 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1134
1135 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08001136}
1137
Adam Jackson23e81d62012-06-06 15:45:44 -04001138static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1139{
1140 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1141 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001142 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04001143
Egbert Eichb543fb02013-04-16 13:36:54 +02001144 if (hotplug_trigger) {
Egbert Eichcd569ae2013-04-16 13:36:57 +02001145 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
1146 ibx_hpd_irq_setup(dev);
Daniel Vetter76e43832012-10-12 20:14:05 +02001147 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02001148 }
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001149 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1150 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1151 SDE_AUDIO_POWER_SHIFT_CPT);
1152 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1153 port_name(port));
1154 }
Adam Jackson23e81d62012-06-06 15:45:44 -04001155
1156 if (pch_iir & SDE_AUX_MASK_CPT)
Daniel Vetterce99c252012-12-01 13:53:47 +01001157 dp_aux_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001158
1159 if (pch_iir & SDE_GMBUS_CPT)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001160 gmbus_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001161
1162 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1163 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1164
1165 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1166 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1167
1168 if (pch_iir & SDE_FDI_MASK_CPT)
1169 for_each_pipe(pipe)
1170 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1171 pipe_name(pipe),
1172 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03001173
1174 if (pch_iir & SDE_ERROR_CPT)
1175 cpt_serr_int_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001176}
1177
Daniel Vetterff1f5252012-10-02 15:10:55 +02001178static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001179{
1180 struct drm_device *dev = (struct drm_device *) arg;
1181 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Ben Widawskyab5c6082013-04-05 13:12:41 -07001182 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01001183 irqreturn_t ret = IRQ_NONE;
1184 int i;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001185
1186 atomic_inc(&dev_priv->irq_received);
1187
Paulo Zanoni86642812013-04-12 17:57:57 -03001188 /* We get interrupts on unclaimed registers, so check for this before we
1189 * do any I915_{READ,WRITE}. */
1190 if (IS_HASWELL(dev) &&
1191 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1192 DRM_ERROR("Unclaimed register before interrupt\n");
1193 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1194 }
1195
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001196 /* disable master interrupt before clearing iir */
1197 de_ier = I915_READ(DEIER);
1198 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Chris Wilson0e434062012-05-09 21:45:44 +01001199
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001200 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1201 * interrupts will will be stored on its back queue, and then we'll be
1202 * able to process them after we restore SDEIER (as soon as we restore
1203 * it, we'll get an interrupt if SDEIIR still has something to process
1204 * due to its back queue). */
Ben Widawskyab5c6082013-04-05 13:12:41 -07001205 if (!HAS_PCH_NOP(dev)) {
1206 sde_ier = I915_READ(SDEIER);
1207 I915_WRITE(SDEIER, 0);
1208 POSTING_READ(SDEIER);
1209 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001210
Paulo Zanoni86642812013-04-12 17:57:57 -03001211 /* On Haswell, also mask ERR_INT because we don't want to risk
1212 * generating "unclaimed register" interrupts from inside the interrupt
1213 * handler. */
1214 if (IS_HASWELL(dev))
1215 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1216
Chris Wilson0e434062012-05-09 21:45:44 +01001217 gt_iir = I915_READ(GTIIR);
1218 if (gt_iir) {
1219 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1220 I915_WRITE(GTIIR, gt_iir);
1221 ret = IRQ_HANDLED;
1222 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001223
1224 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01001225 if (de_iir) {
Paulo Zanoni86642812013-04-12 17:57:57 -03001226 if (de_iir & DE_ERR_INT_IVB)
1227 ivb_err_int_handler(dev);
1228
Daniel Vetterce99c252012-12-01 13:53:47 +01001229 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1230 dp_aux_irq_handler(dev);
1231
Chris Wilson0e434062012-05-09 21:45:44 +01001232 if (de_iir & DE_GSE_IVB)
Jani Nikula81a07802013-04-24 22:18:44 +03001233 intel_opregion_asle_intr(dev);
Chris Wilson0e434062012-05-09 21:45:44 +01001234
1235 for (i = 0; i < 3; i++) {
Daniel Vetter74d44442012-10-02 17:54:35 +02001236 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1237 drm_handle_vblank(dev, i);
Chris Wilson0e434062012-05-09 21:45:44 +01001238 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1239 intel_prepare_page_flip(dev, i);
1240 intel_finish_page_flip_plane(dev, i);
1241 }
Chris Wilson0e434062012-05-09 21:45:44 +01001242 }
1243
1244 /* check event from PCH */
Ben Widawskyab5c6082013-04-05 13:12:41 -07001245 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
Chris Wilson0e434062012-05-09 21:45:44 +01001246 u32 pch_iir = I915_READ(SDEIIR);
1247
Adam Jackson23e81d62012-06-06 15:45:44 -04001248 cpt_irq_handler(dev, pch_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001249
1250 /* clear PCH hotplug event before clear CPU irq */
1251 I915_WRITE(SDEIIR, pch_iir);
1252 }
1253
1254 I915_WRITE(DEIIR, de_iir);
1255 ret = IRQ_HANDLED;
1256 }
1257
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001258 pm_iir = I915_READ(GEN6_PMIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01001259 if (pm_iir) {
Ben Widawskybaf02a12013-05-28 19:22:24 -07001260 if (IS_HASWELL(dev))
1261 hsw_pm_irq_handler(dev_priv, pm_iir);
1262 else if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
Chris Wilson0e434062012-05-09 21:45:44 +01001263 gen6_queue_rps_work(dev_priv, pm_iir);
1264 I915_WRITE(GEN6_PMIIR, pm_iir);
1265 ret = IRQ_HANDLED;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001266 }
1267
Paulo Zanoni86642812013-04-12 17:57:57 -03001268 if (IS_HASWELL(dev) && ivb_can_enable_err_int(dev))
1269 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1270
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001271 I915_WRITE(DEIER, de_ier);
1272 POSTING_READ(DEIER);
Ben Widawskyab5c6082013-04-05 13:12:41 -07001273 if (!HAS_PCH_NOP(dev)) {
1274 I915_WRITE(SDEIER, sde_ier);
1275 POSTING_READ(SDEIER);
1276 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001277
1278 return ret;
1279}
1280
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001281static void ilk_gt_irq_handler(struct drm_device *dev,
1282 struct drm_i915_private *dev_priv,
1283 u32 gt_iir)
1284{
1285 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
1286 notify_ring(dev, &dev_priv->ring[RCS]);
1287 if (gt_iir & GT_BSD_USER_INTERRUPT)
1288 notify_ring(dev, &dev_priv->ring[VCS]);
1289}
1290
Daniel Vetterff1f5252012-10-02 15:10:55 +02001291static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001292{
Jesse Barnes46979952011-04-07 13:53:55 -07001293 struct drm_device *dev = (struct drm_device *) arg;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001294 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1295 int ret = IRQ_NONE;
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001296 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001297
Jesse Barnes46979952011-04-07 13:53:55 -07001298 atomic_inc(&dev_priv->irq_received);
1299
Zou, Nanhai2d109a82009-11-06 02:13:01 +00001300 /* disable master interrupt before clearing iir */
1301 de_ier = I915_READ(DEIER);
1302 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001303 POSTING_READ(DEIER);
Zou, Nanhai2d109a82009-11-06 02:13:01 +00001304
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001305 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1306 * interrupts will will be stored on its back queue, and then we'll be
1307 * able to process them after we restore SDEIER (as soon as we restore
1308 * it, we'll get an interrupt if SDEIIR still has something to process
1309 * due to its back queue). */
1310 sde_ier = I915_READ(SDEIER);
1311 I915_WRITE(SDEIER, 0);
1312 POSTING_READ(SDEIER);
1313
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001314 de_iir = I915_READ(DEIIR);
1315 gt_iir = I915_READ(GTIIR);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001316 pm_iir = I915_READ(GEN6_PMIIR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001317
Daniel Vetteracd15b62012-11-30 11:24:50 +01001318 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
Zou Nan haic7c85102010-01-15 10:29:06 +08001319 goto done;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001320
Zou Nan haic7c85102010-01-15 10:29:06 +08001321 ret = IRQ_HANDLED;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001322
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001323 if (IS_GEN5(dev))
1324 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1325 else
1326 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Zou Nan haic7c85102010-01-15 10:29:06 +08001327
Daniel Vetterce99c252012-12-01 13:53:47 +01001328 if (de_iir & DE_AUX_CHANNEL_A)
1329 dp_aux_irq_handler(dev);
1330
Zou Nan haic7c85102010-01-15 10:29:06 +08001331 if (de_iir & DE_GSE)
Jani Nikula81a07802013-04-24 22:18:44 +03001332 intel_opregion_asle_intr(dev);
Zou Nan haic7c85102010-01-15 10:29:06 +08001333
Daniel Vetter74d44442012-10-02 17:54:35 +02001334 if (de_iir & DE_PIPEA_VBLANK)
1335 drm_handle_vblank(dev, 0);
1336
1337 if (de_iir & DE_PIPEB_VBLANK)
1338 drm_handle_vblank(dev, 1);
1339
Paulo Zanonide032bf2013-04-12 17:57:58 -03001340 if (de_iir & DE_POISON)
1341 DRM_ERROR("Poison interrupt\n");
1342
Paulo Zanoni86642812013-04-12 17:57:57 -03001343 if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1344 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1345 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1346
1347 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1348 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1349 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1350
Zhenyu Wangf072d2e2010-02-09 09:46:19 +08001351 if (de_iir & DE_PLANEA_FLIP_DONE) {
Jesse Barnes013d5aa2010-01-29 11:18:31 -08001352 intel_prepare_page_flip(dev, 0);
Chris Wilson2bbda382010-09-02 17:59:39 +01001353 intel_finish_page_flip_plane(dev, 0);
Jesse Barnes013d5aa2010-01-29 11:18:31 -08001354 }
1355
Zhenyu Wangf072d2e2010-02-09 09:46:19 +08001356 if (de_iir & DE_PLANEB_FLIP_DONE) {
1357 intel_prepare_page_flip(dev, 1);
Chris Wilson2bbda382010-09-02 17:59:39 +01001358 intel_finish_page_flip_plane(dev, 1);
Jesse Barnes013d5aa2010-01-29 11:18:31 -08001359 }
Li Pengc062df62010-01-23 00:12:58 +08001360
Zou Nan haic7c85102010-01-15 10:29:06 +08001361 /* check event from PCH */
Jesse Barnes776ad802011-01-04 15:09:39 -08001362 if (de_iir & DE_PCH_EVENT) {
Daniel Vetteracd15b62012-11-30 11:24:50 +01001363 u32 pch_iir = I915_READ(SDEIIR);
1364
Adam Jackson23e81d62012-06-06 15:45:44 -04001365 if (HAS_PCH_CPT(dev))
1366 cpt_irq_handler(dev, pch_iir);
1367 else
1368 ibx_irq_handler(dev, pch_iir);
Daniel Vetteracd15b62012-11-30 11:24:50 +01001369
1370 /* should clear PCH hotplug event before clear CPU irq */
1371 I915_WRITE(SDEIIR, pch_iir);
Jesse Barnes776ad802011-01-04 15:09:39 -08001372 }
Zou Nan haic7c85102010-01-15 10:29:06 +08001373
Daniel Vetter73edd18f2012-08-08 23:35:37 +02001374 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1375 ironlake_handle_rps_change(dev);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001376
Chris Wilsonfc6826d2012-04-15 11:56:03 +01001377 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
1378 gen6_queue_rps_work(dev_priv, pm_iir);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001379
Zou Nan haic7c85102010-01-15 10:29:06 +08001380 I915_WRITE(GTIIR, gt_iir);
1381 I915_WRITE(DEIIR, de_iir);
Ben Widawsky4912d042011-04-25 11:25:20 -07001382 I915_WRITE(GEN6_PMIIR, pm_iir);
Zou Nan haic7c85102010-01-15 10:29:06 +08001383
1384done:
Zou, Nanhai2d109a82009-11-06 02:13:01 +00001385 I915_WRITE(DEIER, de_ier);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001386 POSTING_READ(DEIER);
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001387 I915_WRITE(SDEIER, sde_ier);
1388 POSTING_READ(SDEIER);
Zou, Nanhai2d109a82009-11-06 02:13:01 +00001389
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001390 return ret;
1391}
1392
Jesse Barnes8a905232009-07-11 16:48:03 -04001393/**
1394 * i915_error_work_func - do process context error handling work
1395 * @work: work struct
1396 *
1397 * Fire an error uevent so userspace can see that a hang or error
1398 * was detected.
1399 */
1400static void i915_error_work_func(struct work_struct *work)
1401{
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001402 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1403 work);
1404 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1405 gpu_error);
Jesse Barnes8a905232009-07-11 16:48:03 -04001406 struct drm_device *dev = dev_priv->dev;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001407 struct intel_ring_buffer *ring;
Ben Gamarif316a422009-09-14 17:48:46 -04001408 char *error_event[] = { "ERROR=1", NULL };
1409 char *reset_event[] = { "RESET=1", NULL };
1410 char *reset_done_event[] = { "ERROR=0", NULL };
Daniel Vetterf69061b2012-12-06 09:01:42 +01001411 int i, ret;
Jesse Barnes8a905232009-07-11 16:48:03 -04001412
Ben Gamarif316a422009-09-14 17:48:46 -04001413 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04001414
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001415 /*
1416 * Note that there's only one work item which does gpu resets, so we
1417 * need not worry about concurrent gpu resets potentially incrementing
1418 * error->reset_counter twice. We only need to take care of another
1419 * racing irq/hangcheck declaring the gpu dead for a second time. A
1420 * quick check for that is good enough: schedule_work ensures the
1421 * correct ordering between hang detection and this work item, and since
1422 * the reset in-progress bit is only ever set by code outside of this
1423 * work we don't need to worry about any other races.
1424 */
1425 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
Chris Wilsonf803aa52010-09-19 12:38:26 +01001426 DRM_DEBUG_DRIVER("resetting chip\n");
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001427 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1428 reset_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001429
Daniel Vetterf69061b2012-12-06 09:01:42 +01001430 ret = i915_reset(dev);
1431
1432 if (ret == 0) {
1433 /*
1434 * After all the gem state is reset, increment the reset
1435 * counter and wake up everyone waiting for the reset to
1436 * complete.
1437 *
1438 * Since unlock operations are a one-sided barrier only,
1439 * we need to insert a barrier here to order any seqno
1440 * updates before
1441 * the counter increment.
1442 */
1443 smp_mb__before_atomic_inc();
1444 atomic_inc(&dev_priv->gpu_error.reset_counter);
1445
1446 kobject_uevent_env(&dev->primary->kdev.kobj,
1447 KOBJ_CHANGE, reset_done_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001448 } else {
1449 atomic_set(&error->reset_counter, I915_WEDGED);
Ben Gamarif316a422009-09-14 17:48:46 -04001450 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001451
Daniel Vetterf69061b2012-12-06 09:01:42 +01001452 for_each_ring(ring, dev_priv, i)
1453 wake_up_all(&ring->irq_queue);
1454
Ville Syrjälä96a02912013-02-18 19:08:49 +02001455 intel_display_handle_reset(dev);
1456
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001457 wake_up_all(&dev_priv->gpu_error.reset_queue);
Ben Gamarif316a422009-09-14 17:48:46 -04001458 }
Jesse Barnes8a905232009-07-11 16:48:03 -04001459}
1460
Daniel Vetter85f9e502012-08-31 21:42:26 +02001461/* NB: please notice the memset */
1462static void i915_get_extra_instdone(struct drm_device *dev,
1463 uint32_t *instdone)
1464{
1465 struct drm_i915_private *dev_priv = dev->dev_private;
1466 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1467
1468 switch(INTEL_INFO(dev)->gen) {
1469 case 2:
1470 case 3:
1471 instdone[0] = I915_READ(INSTDONE);
1472 break;
1473 case 4:
1474 case 5:
1475 case 6:
1476 instdone[0] = I915_READ(INSTDONE_I965);
1477 instdone[1] = I915_READ(INSTDONE1);
1478 break;
1479 default:
1480 WARN_ONCE(1, "Unsupported platform\n");
1481 case 7:
1482 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1483 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1484 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1485 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1486 break;
1487 }
1488}
1489
Chris Wilson3bd3c932010-08-19 08:19:30 +01001490#ifdef CONFIG_DEBUG_FS
Chris Wilson9df30792010-02-18 10:24:56 +00001491static struct drm_i915_error_object *
Ben Widawskyd0d045e2013-02-24 18:10:00 -08001492i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1493 struct drm_i915_gem_object *src,
1494 const int num_pages)
Chris Wilson9df30792010-02-18 10:24:56 +00001495{
1496 struct drm_i915_error_object *dst;
Ben Widawskyd0d045e2013-02-24 18:10:00 -08001497 int i;
Chris Wilsone56660d2010-08-07 11:01:26 +01001498 u32 reloc_offset;
Chris Wilson9df30792010-02-18 10:24:56 +00001499
Chris Wilson05394f32010-11-08 19:18:58 +00001500 if (src == NULL || src->pages == NULL)
Chris Wilson9df30792010-02-18 10:24:56 +00001501 return NULL;
1502
Ben Widawskyd0d045e2013-02-24 18:10:00 -08001503 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
Chris Wilson9df30792010-02-18 10:24:56 +00001504 if (dst == NULL)
1505 return NULL;
1506
Chris Wilson05394f32010-11-08 19:18:58 +00001507 reloc_offset = src->gtt_offset;
Ben Widawskyd0d045e2013-02-24 18:10:00 -08001508 for (i = 0; i < num_pages; i++) {
Andrew Morton788885a2010-05-11 14:07:05 -07001509 unsigned long flags;
Chris Wilsone56660d2010-08-07 11:01:26 +01001510 void *d;
Andrew Morton788885a2010-05-11 14:07:05 -07001511
Chris Wilsone56660d2010-08-07 11:01:26 +01001512 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
Chris Wilson9df30792010-02-18 10:24:56 +00001513 if (d == NULL)
1514 goto unwind;
Chris Wilsone56660d2010-08-07 11:01:26 +01001515
Andrew Morton788885a2010-05-11 14:07:05 -07001516 local_irq_save(flags);
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001517 if (reloc_offset < dev_priv->gtt.mappable_end &&
Daniel Vetter74898d72012-02-15 23:50:22 +01001518 src->has_global_gtt_mapping) {
Chris Wilson172975aa2011-12-14 13:57:25 +01001519 void __iomem *s;
1520
1521 /* Simply ignore tiling or any overlapping fence.
1522 * It's part of the error state, and this hopefully
1523 * captures what the GPU read.
1524 */
1525
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001526 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
Chris Wilson172975aa2011-12-14 13:57:25 +01001527 reloc_offset);
1528 memcpy_fromio(d, s, PAGE_SIZE);
1529 io_mapping_unmap_atomic(s);
Chris Wilson960e3562012-11-15 11:32:23 +00001530 } else if (src->stolen) {
1531 unsigned long offset;
1532
1533 offset = dev_priv->mm.stolen_base;
1534 offset += src->stolen->start;
1535 offset += i << PAGE_SHIFT;
1536
Daniel Vetter1a240d42012-11-29 22:18:51 +01001537 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
Chris Wilson172975aa2011-12-14 13:57:25 +01001538 } else {
Chris Wilson9da3da62012-06-01 15:20:22 +01001539 struct page *page;
Chris Wilson172975aa2011-12-14 13:57:25 +01001540 void *s;
1541
Chris Wilson9da3da62012-06-01 15:20:22 +01001542 page = i915_gem_object_get_page(src, i);
Chris Wilson172975aa2011-12-14 13:57:25 +01001543
Chris Wilson9da3da62012-06-01 15:20:22 +01001544 drm_clflush_pages(&page, 1);
1545
1546 s = kmap_atomic(page);
Chris Wilson172975aa2011-12-14 13:57:25 +01001547 memcpy(d, s, PAGE_SIZE);
1548 kunmap_atomic(s);
1549
Chris Wilson9da3da62012-06-01 15:20:22 +01001550 drm_clflush_pages(&page, 1);
Chris Wilson172975aa2011-12-14 13:57:25 +01001551 }
Andrew Morton788885a2010-05-11 14:07:05 -07001552 local_irq_restore(flags);
Chris Wilsone56660d2010-08-07 11:01:26 +01001553
Chris Wilson9da3da62012-06-01 15:20:22 +01001554 dst->pages[i] = d;
Chris Wilsone56660d2010-08-07 11:01:26 +01001555
1556 reloc_offset += PAGE_SIZE;
Chris Wilson9df30792010-02-18 10:24:56 +00001557 }
Ben Widawskyd0d045e2013-02-24 18:10:00 -08001558 dst->page_count = num_pages;
Chris Wilson05394f32010-11-08 19:18:58 +00001559 dst->gtt_offset = src->gtt_offset;
Chris Wilson9df30792010-02-18 10:24:56 +00001560
1561 return dst;
1562
1563unwind:
Chris Wilson9da3da62012-06-01 15:20:22 +01001564 while (i--)
1565 kfree(dst->pages[i]);
Chris Wilson9df30792010-02-18 10:24:56 +00001566 kfree(dst);
1567 return NULL;
1568}
Ben Widawskyd0d045e2013-02-24 18:10:00 -08001569#define i915_error_object_create(dev_priv, src) \
1570 i915_error_object_create_sized((dev_priv), (src), \
1571 (src)->base.size>>PAGE_SHIFT)
Chris Wilson9df30792010-02-18 10:24:56 +00001572
1573static void
1574i915_error_object_free(struct drm_i915_error_object *obj)
1575{
1576 int page;
1577
1578 if (obj == NULL)
1579 return;
1580
1581 for (page = 0; page < obj->page_count; page++)
1582 kfree(obj->pages[page]);
1583
1584 kfree(obj);
1585}
1586
Daniel Vetter742cbee2012-04-27 15:17:39 +02001587void
1588i915_error_state_free(struct kref *error_ref)
Chris Wilson9df30792010-02-18 10:24:56 +00001589{
Daniel Vetter742cbee2012-04-27 15:17:39 +02001590 struct drm_i915_error_state *error = container_of(error_ref,
1591 typeof(*error), ref);
Chris Wilsone2f973d2011-01-27 19:15:11 +00001592 int i;
1593
Chris Wilson52d39a22012-02-15 11:25:37 +00001594 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1595 i915_error_object_free(error->ring[i].batchbuffer);
1596 i915_error_object_free(error->ring[i].ringbuffer);
Ben Widawsky7ed73da2013-05-25 14:42:54 -07001597 i915_error_object_free(error->ring[i].ctx);
Chris Wilson52d39a22012-02-15 11:25:37 +00001598 kfree(error->ring[i].requests);
1599 }
Chris Wilsone2f973d2011-01-27 19:15:11 +00001600
Chris Wilson9df30792010-02-18 10:24:56 +00001601 kfree(error->active_bo);
Chris Wilson6ef3d422010-08-04 20:26:07 +01001602 kfree(error->overlay);
Ben Widawsky7ed73da2013-05-25 14:42:54 -07001603 kfree(error->display);
Chris Wilson9df30792010-02-18 10:24:56 +00001604 kfree(error);
1605}
Chris Wilson1b502472012-04-24 15:47:30 +01001606static void capture_bo(struct drm_i915_error_buffer *err,
1607 struct drm_i915_gem_object *obj)
1608{
1609 err->size = obj->base.size;
1610 err->name = obj->base.name;
Chris Wilson0201f1e2012-07-20 12:41:01 +01001611 err->rseqno = obj->last_read_seqno;
1612 err->wseqno = obj->last_write_seqno;
Chris Wilson1b502472012-04-24 15:47:30 +01001613 err->gtt_offset = obj->gtt_offset;
1614 err->read_domains = obj->base.read_domains;
1615 err->write_domain = obj->base.write_domain;
1616 err->fence_reg = obj->fence_reg;
1617 err->pinned = 0;
1618 if (obj->pin_count > 0)
1619 err->pinned = 1;
1620 if (obj->user_pin_count > 0)
1621 err->pinned = -1;
1622 err->tiling = obj->tiling_mode;
1623 err->dirty = obj->dirty;
1624 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1625 err->ring = obj->ring ? obj->ring->id : -1;
1626 err->cache_level = obj->cache_level;
1627}
Chris Wilson9df30792010-02-18 10:24:56 +00001628
Chris Wilson1b502472012-04-24 15:47:30 +01001629static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1630 int count, struct list_head *head)
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001631{
1632 struct drm_i915_gem_object *obj;
1633 int i = 0;
1634
1635 list_for_each_entry(obj, head, mm_list) {
Chris Wilson1b502472012-04-24 15:47:30 +01001636 capture_bo(err++, obj);
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001637 if (++i == count)
1638 break;
Chris Wilson1b502472012-04-24 15:47:30 +01001639 }
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001640
Chris Wilson1b502472012-04-24 15:47:30 +01001641 return i;
1642}
1643
1644static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1645 int count, struct list_head *head)
1646{
1647 struct drm_i915_gem_object *obj;
1648 int i = 0;
1649
1650 list_for_each_entry(obj, head, gtt_list) {
1651 if (obj->pin_count == 0)
1652 continue;
1653
1654 capture_bo(err++, obj);
1655 if (++i == count)
1656 break;
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001657 }
1658
1659 return i;
1660}
1661
Chris Wilson748ebc62010-10-24 10:28:47 +01001662static void i915_gem_record_fences(struct drm_device *dev,
1663 struct drm_i915_error_state *error)
1664{
1665 struct drm_i915_private *dev_priv = dev->dev_private;
1666 int i;
1667
1668 /* Fences */
1669 switch (INTEL_INFO(dev)->gen) {
Daniel Vetter775d17b2011-10-09 21:52:01 +02001670 case 7:
Chris Wilson748ebc62010-10-24 10:28:47 +01001671 case 6:
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03001672 for (i = 0; i < dev_priv->num_fence_regs; i++)
Chris Wilson748ebc62010-10-24 10:28:47 +01001673 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1674 break;
1675 case 5:
1676 case 4:
1677 for (i = 0; i < 16; i++)
1678 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1679 break;
1680 case 3:
1681 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1682 for (i = 0; i < 8; i++)
1683 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1684 case 2:
1685 for (i = 0; i < 8; i++)
1686 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1687 break;
1688
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08001689 default:
1690 BUG();
Chris Wilson748ebc62010-10-24 10:28:47 +01001691 }
1692}
1693
Chris Wilsonbcfb2e22011-01-07 21:06:07 +00001694static struct drm_i915_error_object *
1695i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1696 struct intel_ring_buffer *ring)
1697{
1698 struct drm_i915_gem_object *obj;
1699 u32 seqno;
1700
1701 if (!ring->get_seqno)
1702 return NULL;
1703
Daniel Vetterb45305f2012-12-17 16:21:27 +01001704 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1705 u32 acthd = I915_READ(ACTHD);
1706
1707 if (WARN_ON(ring->id != RCS))
1708 return NULL;
1709
1710 obj = ring->private;
1711 if (acthd >= obj->gtt_offset &&
1712 acthd < obj->gtt_offset + obj->base.size)
1713 return i915_error_object_create(dev_priv, obj);
1714 }
1715
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01001716 seqno = ring->get_seqno(ring, false);
Chris Wilsonbcfb2e22011-01-07 21:06:07 +00001717 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1718 if (obj->ring != ring)
1719 continue;
1720
Chris Wilson0201f1e2012-07-20 12:41:01 +01001721 if (i915_seqno_passed(seqno, obj->last_read_seqno))
Chris Wilsonbcfb2e22011-01-07 21:06:07 +00001722 continue;
1723
1724 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1725 continue;
1726
1727 /* We need to copy these to an anonymous buffer as the simplest
1728 * method to avoid being overwritten by userspace.
1729 */
1730 return i915_error_object_create(dev_priv, obj);
1731 }
1732
1733 return NULL;
1734}
1735
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001736static void i915_record_ring_state(struct drm_device *dev,
1737 struct drm_i915_error_state *error,
1738 struct intel_ring_buffer *ring)
1739{
1740 struct drm_i915_private *dev_priv = dev->dev_private;
1741
Daniel Vetter33f3f512011-12-14 13:57:39 +01001742 if (INTEL_INFO(dev)->gen >= 6) {
Chris Wilson12f55812012-07-05 17:14:01 +01001743 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
Daniel Vetter33f3f512011-12-14 13:57:39 +01001744 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
Daniel Vetter7e3b8732012-02-01 22:26:45 +01001745 error->semaphore_mboxes[ring->id][0]
1746 = I915_READ(RING_SYNC_0(ring->mmio_base));
1747 error->semaphore_mboxes[ring->id][1]
1748 = I915_READ(RING_SYNC_1(ring->mmio_base));
Chris Wilsondf2b23d2012-11-27 17:06:54 +00001749 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1750 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
Daniel Vetter33f3f512011-12-14 13:57:39 +01001751 }
Daniel Vetterc1cd90e2011-12-14 13:57:02 +01001752
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001753 if (INTEL_INFO(dev)->gen >= 4) {
Daniel Vetter9d2f41f2012-04-02 21:41:45 +02001754 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001755 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1756 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1757 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
Daniel Vetterc1cd90e2011-12-14 13:57:02 +01001758 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
Ben Widawsky050ee912012-08-22 11:32:15 -07001759 if (ring->id == RCS)
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001760 error->bbaddr = I915_READ64(BB_ADDR);
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001761 } else {
Daniel Vetter9d2f41f2012-04-02 21:41:45 +02001762 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001763 error->ipeir[ring->id] = I915_READ(IPEIR);
1764 error->ipehr[ring->id] = I915_READ(IPEHR);
1765 error->instdone[ring->id] = I915_READ(INSTDONE);
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001766 }
1767
Ben Widawsky9574b3f2012-04-26 16:03:01 -07001768 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
Daniel Vetterc1cd90e2011-12-14 13:57:02 +01001769 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01001770 error->seqno[ring->id] = ring->get_seqno(ring, false);
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001771 error->acthd[ring->id] = intel_ring_get_active_head(ring);
Daniel Vetterc1cd90e2011-12-14 13:57:02 +01001772 error->head[ring->id] = I915_READ_HEAD(ring);
1773 error->tail[ring->id] = I915_READ_TAIL(ring);
Chris Wilson0f3b6842013-01-15 12:05:55 +00001774 error->ctl[ring->id] = I915_READ_CTL(ring);
Daniel Vetter7e3b8732012-02-01 22:26:45 +01001775
1776 error->cpu_ring_head[ring->id] = ring->head;
1777 error->cpu_ring_tail[ring->id] = ring->tail;
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001778}
1779
Ben Widawsky8c123e52013-03-04 17:00:29 -08001780
1781static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1782 struct drm_i915_error_state *error,
1783 struct drm_i915_error_ring *ering)
1784{
1785 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1786 struct drm_i915_gem_object *obj;
1787
1788 /* Currently render ring is the only HW context user */
1789 if (ring->id != RCS || !error->ccid)
1790 return;
1791
1792 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
1793 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1794 ering->ctx = i915_error_object_create_sized(dev_priv,
1795 obj, 1);
1796 }
1797 }
1798}
1799
Chris Wilson52d39a22012-02-15 11:25:37 +00001800static void i915_gem_record_rings(struct drm_device *dev,
1801 struct drm_i915_error_state *error)
1802{
1803 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01001804 struct intel_ring_buffer *ring;
Chris Wilson52d39a22012-02-15 11:25:37 +00001805 struct drm_i915_gem_request *request;
1806 int i, count;
1807
Chris Wilsonb4519512012-05-11 14:29:30 +01001808 for_each_ring(ring, dev_priv, i) {
Chris Wilson52d39a22012-02-15 11:25:37 +00001809 i915_record_ring_state(dev, error, ring);
1810
1811 error->ring[i].batchbuffer =
1812 i915_error_first_batchbuffer(dev_priv, ring);
1813
1814 error->ring[i].ringbuffer =
1815 i915_error_object_create(dev_priv, ring->obj);
1816
Ben Widawsky8c123e52013-03-04 17:00:29 -08001817
1818 i915_gem_record_active_context(ring, error, &error->ring[i]);
1819
Chris Wilson52d39a22012-02-15 11:25:37 +00001820 count = 0;
1821 list_for_each_entry(request, &ring->request_list, list)
1822 count++;
1823
1824 error->ring[i].num_requests = count;
1825 error->ring[i].requests =
1826 kmalloc(count*sizeof(struct drm_i915_error_request),
1827 GFP_ATOMIC);
1828 if (error->ring[i].requests == NULL) {
1829 error->ring[i].num_requests = 0;
1830 continue;
1831 }
1832
1833 count = 0;
1834 list_for_each_entry(request, &ring->request_list, list) {
1835 struct drm_i915_error_request *erq;
1836
1837 erq = &error->ring[i].requests[count++];
1838 erq->seqno = request->seqno;
1839 erq->jiffies = request->emitted_jiffies;
Chris Wilsonee4f42b2012-02-15 11:25:38 +00001840 erq->tail = request->tail;
Chris Wilson52d39a22012-02-15 11:25:37 +00001841 }
1842 }
1843}
1844
Jesse Barnes8a905232009-07-11 16:48:03 -04001845/**
1846 * i915_capture_error_state - capture an error record for later analysis
1847 * @dev: drm device
1848 *
1849 * Should be called when an error is detected (either a hang or an error
1850 * interrupt) to capture error state from the time of the error. Fills
1851 * out a structure which becomes available in debugfs for user level tools
1852 * to pick up.
1853 */
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001854static void i915_capture_error_state(struct drm_device *dev)
1855{
1856 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001857 struct drm_i915_gem_object *obj;
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001858 struct drm_i915_error_state *error;
1859 unsigned long flags;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001860 int i, pipe;
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001861
Daniel Vetter99584db2012-11-14 17:14:04 +01001862 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1863 error = dev_priv->gpu_error.first_error;
1864 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
Chris Wilson9df30792010-02-18 10:24:56 +00001865 if (error)
1866 return;
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001867
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001868 /* Account for pipe specific data like PIPE*STAT */
Daniel Vetter33f3f512011-12-14 13:57:39 +01001869 error = kzalloc(sizeof(*error), GFP_ATOMIC);
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001870 if (!error) {
Chris Wilson9df30792010-02-18 10:24:56 +00001871 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1872 return;
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001873 }
1874
Paulo Zanoni5d83d292013-03-06 20:03:22 -03001875 DRM_INFO("capturing error event; look for more information in "
Ben Widawsky2f86f192013-01-28 15:32:15 -08001876 "/sys/kernel/debug/dri/%d/i915_error_state\n",
Chris Wilsonb6f78332011-02-01 14:15:55 +00001877 dev->primary->index);
Chris Wilson2fa772f2010-10-01 13:23:27 +01001878
Daniel Vetter742cbee2012-04-27 15:17:39 +02001879 kref_init(&error->ref);
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001880 error->eir = I915_READ(EIR);
1881 error->pgtbl_er = I915_READ(PGTBL_ER);
Ben Widawsky211816e2013-02-24 18:10:01 -08001882 if (HAS_HW_CONTEXTS(dev))
1883 error->ccid = I915_READ(CCID);
Ben Widawskybe998e22012-04-26 16:03:00 -07001884
1885 if (HAS_PCH_SPLIT(dev))
1886 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1887 else if (IS_VALLEYVIEW(dev))
1888 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1889 else if (IS_GEN2(dev))
1890 error->ier = I915_READ16(IER);
1891 else
1892 error->ier = I915_READ(IER);
1893
Chris Wilson0f3b6842013-01-15 12:05:55 +00001894 if (INTEL_INFO(dev)->gen >= 6)
1895 error->derrmr = I915_READ(DERRMR);
1896
1897 if (IS_VALLEYVIEW(dev))
1898 error->forcewake = I915_READ(FORCEWAKE_VLV);
1899 else if (INTEL_INFO(dev)->gen >= 7)
1900 error->forcewake = I915_READ(FORCEWAKE_MT);
1901 else if (INTEL_INFO(dev)->gen == 6)
1902 error->forcewake = I915_READ(FORCEWAKE);
1903
Paulo Zanoni4f3308b2013-03-22 14:24:16 -03001904 if (!HAS_PCH_SPLIT(dev))
1905 for_each_pipe(pipe)
1906 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001907
Daniel Vetter33f3f512011-12-14 13:57:39 +01001908 if (INTEL_INFO(dev)->gen >= 6) {
Chris Wilsonf4068392010-10-27 20:36:41 +01001909 error->error = I915_READ(ERROR_GEN6);
Daniel Vetter33f3f512011-12-14 13:57:39 +01001910 error->done_reg = I915_READ(DONE_REG);
1911 }
Chris Wilsonadd354d2010-10-29 19:00:51 +01001912
Ben Widawsky71e172e2012-08-20 16:15:13 -07001913 if (INTEL_INFO(dev)->gen == 7)
1914 error->err_int = I915_READ(GEN7_ERR_INT);
1915
Ben Widawsky050ee912012-08-22 11:32:15 -07001916 i915_get_extra_instdone(dev, error->extra_instdone);
1917
Chris Wilson748ebc62010-10-24 10:28:47 +01001918 i915_gem_record_fences(dev, error);
Chris Wilson52d39a22012-02-15 11:25:37 +00001919 i915_gem_record_rings(dev, error);
Chris Wilson9df30792010-02-18 10:24:56 +00001920
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001921 /* Record buffers on the active and pinned lists. */
Chris Wilson9df30792010-02-18 10:24:56 +00001922 error->active_bo = NULL;
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001923 error->pinned_bo = NULL;
Chris Wilson9df30792010-02-18 10:24:56 +00001924
Chris Wilsonbcfb2e22011-01-07 21:06:07 +00001925 i = 0;
1926 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1927 i++;
1928 error->active_bo_count = i;
Chris Wilson6c085a72012-08-20 11:40:46 +02001929 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
Chris Wilson1b502472012-04-24 15:47:30 +01001930 if (obj->pin_count)
1931 i++;
Chris Wilsonbcfb2e22011-01-07 21:06:07 +00001932 error->pinned_bo_count = i - error->active_bo_count;
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001933
Chris Wilson8e934db2011-01-24 12:34:00 +00001934 error->active_bo = NULL;
1935 error->pinned_bo = NULL;
Chris Wilsonbcfb2e22011-01-07 21:06:07 +00001936 if (i) {
1937 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
Chris Wilson9df30792010-02-18 10:24:56 +00001938 GFP_ATOMIC);
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001939 if (error->active_bo)
1940 error->pinned_bo =
1941 error->active_bo + error->active_bo_count;
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001942 }
1943
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001944 if (error->active_bo)
1945 error->active_bo_count =
Chris Wilson1b502472012-04-24 15:47:30 +01001946 capture_active_bo(error->active_bo,
1947 error->active_bo_count,
1948 &dev_priv->mm.active_list);
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001949
1950 if (error->pinned_bo)
1951 error->pinned_bo_count =
Chris Wilson1b502472012-04-24 15:47:30 +01001952 capture_pinned_bo(error->pinned_bo,
1953 error->pinned_bo_count,
Chris Wilson6c085a72012-08-20 11:40:46 +02001954 &dev_priv->mm.bound_list);
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001955
Jesse Barnes8a905232009-07-11 16:48:03 -04001956 do_gettimeofday(&error->time);
1957
Chris Wilson6ef3d422010-08-04 20:26:07 +01001958 error->overlay = intel_overlay_capture_error_state(dev);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +00001959 error->display = intel_display_capture_error_state(dev);
Chris Wilson6ef3d422010-08-04 20:26:07 +01001960
Daniel Vetter99584db2012-11-14 17:14:04 +01001961 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1962 if (dev_priv->gpu_error.first_error == NULL) {
1963 dev_priv->gpu_error.first_error = error;
Chris Wilson9df30792010-02-18 10:24:56 +00001964 error = NULL;
1965 }
Daniel Vetter99584db2012-11-14 17:14:04 +01001966 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
Chris Wilson9df30792010-02-18 10:24:56 +00001967
1968 if (error)
Daniel Vetter742cbee2012-04-27 15:17:39 +02001969 i915_error_state_free(&error->ref);
Chris Wilson9df30792010-02-18 10:24:56 +00001970}
1971
1972void i915_destroy_error_state(struct drm_device *dev)
1973{
1974 struct drm_i915_private *dev_priv = dev->dev_private;
1975 struct drm_i915_error_state *error;
Ben Widawsky6dc0e812012-01-23 15:30:02 -08001976 unsigned long flags;
Chris Wilson9df30792010-02-18 10:24:56 +00001977
Daniel Vetter99584db2012-11-14 17:14:04 +01001978 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1979 error = dev_priv->gpu_error.first_error;
1980 dev_priv->gpu_error.first_error = NULL;
1981 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
Chris Wilson9df30792010-02-18 10:24:56 +00001982
1983 if (error)
Daniel Vetter742cbee2012-04-27 15:17:39 +02001984 kref_put(&error->ref, i915_error_state_free);
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001985}
Chris Wilson3bd3c932010-08-19 08:19:30 +01001986#else
1987#define i915_capture_error_state(x)
1988#endif
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001989
Chris Wilson35aed2e2010-05-27 13:18:12 +01001990static void i915_report_and_clear_eir(struct drm_device *dev)
Jesse Barnes8a905232009-07-11 16:48:03 -04001991{
1992 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskybd9854f2012-08-23 15:18:09 -07001993 uint32_t instdone[I915_NUM_INSTDONE_REG];
Jesse Barnes8a905232009-07-11 16:48:03 -04001994 u32 eir = I915_READ(EIR);
Ben Widawsky050ee912012-08-22 11:32:15 -07001995 int pipe, i;
Jesse Barnes8a905232009-07-11 16:48:03 -04001996
Chris Wilson35aed2e2010-05-27 13:18:12 +01001997 if (!eir)
1998 return;
Jesse Barnes8a905232009-07-11 16:48:03 -04001999
Joe Perchesa70491c2012-03-18 13:00:11 -07002000 pr_err("render error detected, EIR: 0x%08x\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04002001
Ben Widawskybd9854f2012-08-23 15:18:09 -07002002 i915_get_extra_instdone(dev, instdone);
2003
Jesse Barnes8a905232009-07-11 16:48:03 -04002004 if (IS_G4X(dev)) {
2005 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2006 u32 ipeir = I915_READ(IPEIR_I965);
2007
Joe Perchesa70491c2012-03-18 13:00:11 -07002008 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2009 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Ben Widawsky050ee912012-08-22 11:32:15 -07002010 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2011 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Joe Perchesa70491c2012-03-18 13:00:11 -07002012 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002013 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002014 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002015 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002016 }
2017 if (eir & GM45_ERROR_PAGE_TABLE) {
2018 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002019 pr_err("page table error\n");
2020 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002021 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002022 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002023 }
2024 }
2025
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002026 if (!IS_GEN2(dev)) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002027 if (eir & I915_ERROR_PAGE_TABLE) {
2028 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002029 pr_err("page table error\n");
2030 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002031 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002032 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002033 }
2034 }
2035
2036 if (eir & I915_ERROR_MEMORY_REFRESH) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002037 pr_err("memory refresh error:\n");
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002038 for_each_pipe(pipe)
Joe Perchesa70491c2012-03-18 13:00:11 -07002039 pr_err("pipe %c stat: 0x%08x\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002040 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
Jesse Barnes8a905232009-07-11 16:48:03 -04002041 /* pipestat has already been acked */
2042 }
2043 if (eir & I915_ERROR_INSTRUCTION) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002044 pr_err("instruction error\n");
2045 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
Ben Widawsky050ee912012-08-22 11:32:15 -07002046 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2047 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002048 if (INTEL_INFO(dev)->gen < 4) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002049 u32 ipeir = I915_READ(IPEIR);
2050
Joe Perchesa70491c2012-03-18 13:00:11 -07002051 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2052 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
Joe Perchesa70491c2012-03-18 13:00:11 -07002053 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
Jesse Barnes8a905232009-07-11 16:48:03 -04002054 I915_WRITE(IPEIR, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002055 POSTING_READ(IPEIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002056 } else {
2057 u32 ipeir = I915_READ(IPEIR_I965);
2058
Joe Perchesa70491c2012-03-18 13:00:11 -07002059 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2060 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Joe Perchesa70491c2012-03-18 13:00:11 -07002061 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002062 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002063 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002064 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002065 }
2066 }
2067
2068 I915_WRITE(EIR, eir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002069 POSTING_READ(EIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002070 eir = I915_READ(EIR);
2071 if (eir) {
2072 /*
2073 * some errors might have become stuck,
2074 * mask them.
2075 */
2076 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2077 I915_WRITE(EMR, I915_READ(EMR) | eir);
2078 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2079 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01002080}
2081
2082/**
2083 * i915_handle_error - handle an error interrupt
2084 * @dev: drm device
2085 *
2086 * Do some basic checking of regsiter state at error interrupt time and
2087 * dump it to the syslog. Also call i915_capture_error_state() to make
2088 * sure we get a record and make it available in debugfs. Fire a uevent
2089 * so userspace knows something bad happened (should trigger collection
2090 * of a ring dump etc.).
2091 */
Chris Wilson527f9e92010-11-11 01:16:58 +00002092void i915_handle_error(struct drm_device *dev, bool wedged)
Chris Wilson35aed2e2010-05-27 13:18:12 +01002093{
2094 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002095 struct intel_ring_buffer *ring;
2096 int i;
Chris Wilson35aed2e2010-05-27 13:18:12 +01002097
2098 i915_capture_error_state(dev);
2099 i915_report_and_clear_eir(dev);
Jesse Barnes8a905232009-07-11 16:48:03 -04002100
Ben Gamariba1234d2009-09-14 17:48:47 -04002101 if (wedged) {
Daniel Vetterf69061b2012-12-06 09:01:42 +01002102 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2103 &dev_priv->gpu_error.reset_counter);
Ben Gamariba1234d2009-09-14 17:48:47 -04002104
Ben Gamari11ed50e2009-09-14 17:48:45 -04002105 /*
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002106 * Wakeup waiting processes so that the reset work item
2107 * doesn't deadlock trying to grab various locks.
Ben Gamari11ed50e2009-09-14 17:48:45 -04002108 */
Chris Wilsonb4519512012-05-11 14:29:30 +01002109 for_each_ring(ring, dev_priv, i)
2110 wake_up_all(&ring->irq_queue);
Ben Gamari11ed50e2009-09-14 17:48:45 -04002111 }
2112
Daniel Vetter99584db2012-11-14 17:14:04 +01002113 queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
Jesse Barnes8a905232009-07-11 16:48:03 -04002114}
2115
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002116static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002117{
2118 drm_i915_private_t *dev_priv = dev->dev_private;
2119 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2120 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Chris Wilson05394f32010-11-08 19:18:58 +00002121 struct drm_i915_gem_object *obj;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002122 struct intel_unpin_work *work;
2123 unsigned long flags;
2124 bool stall_detected;
2125
2126 /* Ignore early vblank irqs */
2127 if (intel_crtc == NULL)
2128 return;
2129
2130 spin_lock_irqsave(&dev->event_lock, flags);
2131 work = intel_crtc->unpin_work;
2132
Chris Wilsone7d841c2012-12-03 11:36:30 +00002133 if (work == NULL ||
2134 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2135 !work->enable_stall_check) {
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002136 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2137 spin_unlock_irqrestore(&dev->event_lock, flags);
2138 return;
2139 }
2140
2141 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
Chris Wilson05394f32010-11-08 19:18:58 +00002142 obj = work->pending_flip_obj;
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002143 if (INTEL_INFO(dev)->gen >= 4) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002144 int dspsurf = DSPSURF(intel_crtc->plane);
Armin Reese446f2542012-03-30 16:20:16 -07002145 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2146 obj->gtt_offset;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002147 } else {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002148 int dspaddr = DSPADDR(intel_crtc->plane);
Chris Wilson05394f32010-11-08 19:18:58 +00002149 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
Ville Syrjälä01f2c772011-12-20 00:06:49 +02002150 crtc->y * crtc->fb->pitches[0] +
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002151 crtc->x * crtc->fb->bits_per_pixel/8);
2152 }
2153
2154 spin_unlock_irqrestore(&dev->event_lock, flags);
2155
2156 if (stall_detected) {
2157 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2158 intel_prepare_page_flip(dev, intel_crtc->plane);
2159 }
2160}
2161
Keith Packard42f52ef2008-10-18 19:39:29 -07002162/* Called from drm generic code, passed 'crtc' which
2163 * we use as a pipe index
2164 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002165static int i915_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002166{
2167 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002168 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002169
Chris Wilson5eddb702010-09-11 13:48:45 +01002170 if (!i915_pipe_enabled(dev, pipe))
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002171 return -EINVAL;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002172
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002173 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002174 if (INTEL_INFO(dev)->gen >= 4)
Keith Packard7c463582008-11-04 02:03:27 -08002175 i915_enable_pipestat(dev_priv, pipe,
2176 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Keith Packarde9d21d72008-10-16 11:31:38 -07002177 else
Keith Packard7c463582008-11-04 02:03:27 -08002178 i915_enable_pipestat(dev_priv, pipe,
2179 PIPE_VBLANK_INTERRUPT_ENABLE);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002180
2181 /* maintain vblank delivery even in deep C-states */
2182 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002183 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002184 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002185
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002186 return 0;
2187}
2188
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002189static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002190{
2191 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2192 unsigned long irqflags;
2193
2194 if (!i915_pipe_enabled(dev, pipe))
2195 return -EINVAL;
2196
2197 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2198 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
Akshay Joshi0206e352011-08-16 15:34:10 -04002199 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002200 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2201
2202 return 0;
2203}
2204
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002205static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002206{
2207 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2208 unsigned long irqflags;
2209
2210 if (!i915_pipe_enabled(dev, pipe))
2211 return -EINVAL;
2212
2213 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilsonb615b572012-05-02 09:52:12 +01002214 ironlake_enable_display_irq(dev_priv,
2215 DE_PIPEA_VBLANK_IVB << (5 * pipe));
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002216 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2217
2218 return 0;
2219}
2220
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002221static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2222{
2223 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2224 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002225 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002226
2227 if (!i915_pipe_enabled(dev, pipe))
2228 return -EINVAL;
2229
2230 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002231 imr = I915_READ(VLV_IMR);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002232 if (pipe == 0)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002233 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002234 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002235 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002236 I915_WRITE(VLV_IMR, imr);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002237 i915_enable_pipestat(dev_priv, pipe,
2238 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002239 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2240
2241 return 0;
2242}
2243
Keith Packard42f52ef2008-10-18 19:39:29 -07002244/* Called from drm generic code, passed 'crtc' which
2245 * we use as a pipe index
2246 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002247static void i915_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002248{
2249 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002250 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002251
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002252 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002253 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002254 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson8692d00e2011-02-05 10:08:21 +00002255
Jesse Barnesf796cf82011-04-07 13:58:17 -07002256 i915_disable_pipestat(dev_priv, pipe,
2257 PIPE_VBLANK_INTERRUPT_ENABLE |
2258 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2259 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2260}
2261
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002262static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002263{
2264 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2265 unsigned long irqflags;
2266
2267 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2268 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
Akshay Joshi0206e352011-08-16 15:34:10 -04002269 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002270 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002271}
2272
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002273static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002274{
2275 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2276 unsigned long irqflags;
2277
2278 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilsonb615b572012-05-02 09:52:12 +01002279 ironlake_disable_display_irq(dev_priv,
2280 DE_PIPEA_VBLANK_IVB << (pipe * 5));
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002281 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2282}
2283
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002284static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2285{
2286 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2287 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002288 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002289
2290 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002291 i915_disable_pipestat(dev_priv, pipe,
2292 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002293 imr = I915_READ(VLV_IMR);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002294 if (pipe == 0)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002295 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002296 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002297 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002298 I915_WRITE(VLV_IMR, imr);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002299 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2300}
2301
Chris Wilson893eead2010-10-27 14:44:35 +01002302static u32
2303ring_last_seqno(struct intel_ring_buffer *ring)
Zou Nan hai852835f2010-05-21 09:08:56 +08002304{
Chris Wilson893eead2010-10-27 14:44:35 +01002305 return list_entry(ring->request_list.prev,
2306 struct drm_i915_gem_request, list)->seqno;
2307}
2308
Mika Kuoppala79ee20d2013-05-13 16:32:09 +03002309static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring,
2310 u32 ring_seqno, bool *err)
Chris Wilson893eead2010-10-27 14:44:35 +01002311{
2312 if (list_empty(&ring->request_list) ||
Mika Kuoppala79ee20d2013-05-13 16:32:09 +03002313 i915_seqno_passed(ring_seqno, ring_last_seqno(ring))) {
Chris Wilson893eead2010-10-27 14:44:35 +01002314 /* Issue a wake-up to catch stuck h/w. */
Ben Widawsky9574b3f2012-04-26 16:03:01 -07002315 if (waitqueue_active(&ring->irq_queue)) {
2316 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2317 ring->name);
Chris Wilson893eead2010-10-27 14:44:35 +01002318 wake_up_all(&ring->irq_queue);
2319 *err = true;
2320 }
2321 return true;
2322 }
2323 return false;
Ben Gamarif65d9422009-09-14 17:48:44 -04002324}
2325
Chris Wilsona24a11e2013-03-14 17:52:05 +02002326static bool semaphore_passed(struct intel_ring_buffer *ring)
2327{
2328 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2329 u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
2330 struct intel_ring_buffer *signaller;
2331 u32 cmd, ipehr, acthd_min;
2332
2333 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2334 if ((ipehr & ~(0x3 << 16)) !=
2335 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2336 return false;
2337
2338 /* ACTHD is likely pointing to the dword after the actual command,
2339 * so scan backwards until we find the MBOX.
2340 */
2341 acthd_min = max((int)acthd - 3 * 4, 0);
2342 do {
2343 cmd = ioread32(ring->virtual_start + acthd);
2344 if (cmd == ipehr)
2345 break;
2346
2347 acthd -= 4;
2348 if (acthd < acthd_min)
2349 return false;
2350 } while (1);
2351
2352 signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2353 return i915_seqno_passed(signaller->get_seqno(signaller, false),
2354 ioread32(ring->virtual_start+acthd+4)+1);
2355}
2356
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002357static bool kick_ring(struct intel_ring_buffer *ring)
2358{
2359 struct drm_device *dev = ring->dev;
2360 struct drm_i915_private *dev_priv = dev->dev_private;
2361 u32 tmp = I915_READ_CTL(ring);
2362 if (tmp & RING_WAIT) {
2363 DRM_ERROR("Kicking stuck wait on %s\n",
2364 ring->name);
2365 I915_WRITE_CTL(ring, tmp);
2366 return true;
2367 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02002368
2369 if (INTEL_INFO(dev)->gen >= 6 &&
2370 tmp & RING_WAIT_SEMAPHORE &&
2371 semaphore_passed(ring)) {
2372 DRM_ERROR("Kicking stuck semaphore on %s\n",
2373 ring->name);
2374 I915_WRITE_CTL(ring, tmp);
2375 return true;
2376 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002377 return false;
2378}
2379
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002380static bool i915_hangcheck_ring_hung(struct intel_ring_buffer *ring)
2381{
2382 if (IS_GEN2(ring->dev))
2383 return false;
2384
2385 /* Is the chip hanging on a WAIT_FOR_EVENT?
2386 * If so we can simply poke the RB_WAIT bit
2387 * and break the hang. This should work on
2388 * all but the second generation chipsets.
2389 */
2390 return !kick_ring(ring);
2391}
2392
Chris Wilsond1e61e72012-04-10 17:00:41 +01002393static bool i915_hangcheck_hung(struct drm_device *dev)
2394{
2395 drm_i915_private_t *dev_priv = dev->dev_private;
2396
Daniel Vetter99584db2012-11-14 17:14:04 +01002397 if (dev_priv->gpu_error.hangcheck_count++ > 1) {
Chris Wilsonb4519512012-05-11 14:29:30 +01002398 bool hung = true;
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002399 struct intel_ring_buffer *ring;
2400 int i;
Chris Wilsonb4519512012-05-11 14:29:30 +01002401
Chris Wilsond1e61e72012-04-10 17:00:41 +01002402 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
2403 i915_handle_error(dev, true);
2404
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002405 for_each_ring(ring, dev_priv, i)
2406 hung &= i915_hangcheck_ring_hung(ring);
Chris Wilsond1e61e72012-04-10 17:00:41 +01002407
Chris Wilsonb4519512012-05-11 14:29:30 +01002408 return hung;
Chris Wilsond1e61e72012-04-10 17:00:41 +01002409 }
2410
2411 return false;
2412}
2413
Ben Gamarif65d9422009-09-14 17:48:44 -04002414/**
2415 * This is called when the chip hasn't reported back with completed
2416 * batchbuffers in a long time. The first time this is called we simply record
2417 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
2418 * again, we assume the chip is wedged and try to fix it.
2419 */
2420void i915_hangcheck_elapsed(unsigned long data)
2421{
2422 struct drm_device *dev = (struct drm_device *)data;
2423 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002424 struct intel_ring_buffer *ring;
2425 bool err = false, idle;
2426 int i;
Mika Kuoppala92cab732013-05-24 17:16:07 +03002427 u32 seqno[I915_NUM_RINGS];
2428 bool work_done;
Chris Wilson893eead2010-10-27 14:44:35 +01002429
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07002430 if (!i915_enable_hangcheck)
2431 return;
2432
Chris Wilsonb4519512012-05-11 14:29:30 +01002433 idle = true;
2434 for_each_ring(ring, dev_priv, i) {
Mika Kuoppala92cab732013-05-24 17:16:07 +03002435 seqno[i] = ring->get_seqno(ring, false);
2436 idle &= i915_hangcheck_ring_idle(ring, seqno[i], &err);
Chris Wilsonb4519512012-05-11 14:29:30 +01002437 }
2438
Chris Wilson893eead2010-10-27 14:44:35 +01002439 /* If all work is done then ACTHD clearly hasn't advanced. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002440 if (idle) {
Chris Wilsond1e61e72012-04-10 17:00:41 +01002441 if (err) {
2442 if (i915_hangcheck_hung(dev))
2443 return;
2444
Chris Wilson893eead2010-10-27 14:44:35 +01002445 goto repeat;
Chris Wilsond1e61e72012-04-10 17:00:41 +01002446 }
2447
Daniel Vetter99584db2012-11-14 17:14:04 +01002448 dev_priv->gpu_error.hangcheck_count = 0;
Chris Wilson893eead2010-10-27 14:44:35 +01002449 return;
2450 }
Eric Anholtb9201c12010-01-08 14:25:16 -08002451
Mika Kuoppala92cab732013-05-24 17:16:07 +03002452 work_done = false;
2453 for_each_ring(ring, dev_priv, i) {
2454 if (ring->hangcheck.seqno != seqno[i]) {
2455 work_done = true;
2456 ring->hangcheck.seqno = seqno[i];
2457 }
2458 }
2459
2460 if (!work_done) {
Chris Wilsond1e61e72012-04-10 17:00:41 +01002461 if (i915_hangcheck_hung(dev))
Chris Wilsoncbb465e2010-06-06 12:16:24 +01002462 return;
Chris Wilsoncbb465e2010-06-06 12:16:24 +01002463 } else {
Daniel Vetter99584db2012-11-14 17:14:04 +01002464 dev_priv->gpu_error.hangcheck_count = 0;
Chris Wilsoncbb465e2010-06-06 12:16:24 +01002465 }
Ben Gamarif65d9422009-09-14 17:48:44 -04002466
Chris Wilson893eead2010-10-27 14:44:35 +01002467repeat:
Ben Gamarif65d9422009-09-14 17:48:44 -04002468 /* Reset timer case chip hangs without another request being added */
Daniel Vetter99584db2012-11-14 17:14:04 +01002469 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
Chris Wilsoncecc21f2012-10-05 17:02:56 +01002470 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
Ben Gamarif65d9422009-09-14 17:48:44 -04002471}
2472
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473/* drm_dma.h hooks
2474*/
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002475static void ironlake_irq_preinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002476{
2477 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2478
Jesse Barnes46979952011-04-07 13:53:55 -07002479 atomic_set(&dev_priv->irq_received, 0);
2480
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002481 I915_WRITE(HWSTAM, 0xeffe);
Daniel Vetterbdfcdb62012-01-05 01:05:26 +01002482
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002483 /* XXX hotplug from PCH */
2484
2485 I915_WRITE(DEIMR, 0xffffffff);
2486 I915_WRITE(DEIER, 0x0);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002487 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002488
2489 /* and GT */
2490 I915_WRITE(GTIMR, 0xffffffff);
2491 I915_WRITE(GTIER, 0x0);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002492 POSTING_READ(GTIER);
Zhenyu Wangc6501562009-11-03 18:57:21 +00002493
Ben Widawsky7d991632013-05-28 19:22:25 -07002494 /* south display irq */
2495 I915_WRITE(SDEIMR, 0xffffffff);
2496 /*
2497 * SDEIER is also touched by the interrupt handler to work around missed
2498 * PCH interrupts. Hence we can't update it after the interrupt handler
2499 * is enabled - instead we unconditionally enable all PCH interrupt
2500 * sources here, but then only unmask them as needed with SDEIMR.
2501 */
2502 I915_WRITE(SDEIER, 0xffffffff);
2503 POSTING_READ(SDEIER);
2504}
2505
2506static void ivybridge_irq_preinstall(struct drm_device *dev)
2507{
2508 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2509
2510 atomic_set(&dev_priv->irq_received, 0);
2511
2512 I915_WRITE(HWSTAM, 0xeffe);
2513
2514 /* XXX hotplug from PCH */
2515
2516 I915_WRITE(DEIMR, 0xffffffff);
2517 I915_WRITE(DEIER, 0x0);
2518 POSTING_READ(DEIER);
2519
2520 /* and GT */
2521 I915_WRITE(GTIMR, 0xffffffff);
2522 I915_WRITE(GTIER, 0x0);
2523 POSTING_READ(GTIER);
2524
Ben Widawskyab5c6082013-04-05 13:12:41 -07002525 if (HAS_PCH_NOP(dev))
2526 return;
2527
Zhenyu Wangc6501562009-11-03 18:57:21 +00002528 /* south display irq */
2529 I915_WRITE(SDEIMR, 0xffffffff);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002530 /*
2531 * SDEIER is also touched by the interrupt handler to work around missed
2532 * PCH interrupts. Hence we can't update it after the interrupt handler
2533 * is enabled - instead we unconditionally enable all PCH interrupt
2534 * sources here, but then only unmask them as needed with SDEIMR.
2535 */
2536 I915_WRITE(SDEIER, 0xffffffff);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002537 POSTING_READ(SDEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002538}
2539
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002540static void valleyview_irq_preinstall(struct drm_device *dev)
2541{
2542 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2543 int pipe;
2544
2545 atomic_set(&dev_priv->irq_received, 0);
2546
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002547 /* VLV magic */
2548 I915_WRITE(VLV_IMR, 0);
2549 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2550 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2551 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2552
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002553 /* and GT */
2554 I915_WRITE(GTIIR, I915_READ(GTIIR));
2555 I915_WRITE(GTIIR, I915_READ(GTIIR));
2556 I915_WRITE(GTIMR, 0xffffffff);
2557 I915_WRITE(GTIER, 0x0);
2558 POSTING_READ(GTIER);
2559
2560 I915_WRITE(DPINVGTT, 0xff);
2561
2562 I915_WRITE(PORT_HOTPLUG_EN, 0);
2563 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2564 for_each_pipe(pipe)
2565 I915_WRITE(PIPESTAT(pipe), 0xffff);
2566 I915_WRITE(VLV_IIR, 0xffffffff);
2567 I915_WRITE(VLV_IMR, 0xffffffff);
2568 I915_WRITE(VLV_IER, 0x0);
2569 POSTING_READ(VLV_IER);
2570}
2571
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002572static void ibx_hpd_irq_setup(struct drm_device *dev)
Keith Packard7fe0b972011-09-19 13:31:02 -07002573{
2574 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002575 struct drm_mode_config *mode_config = &dev->mode_config;
2576 struct intel_encoder *intel_encoder;
2577 u32 mask = ~I915_READ(SDEIMR);
2578 u32 hotplug;
Keith Packard7fe0b972011-09-19 13:31:02 -07002579
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002580 if (HAS_PCH_IBX(dev)) {
Egbert Eich995e6b32013-04-16 13:36:56 +02002581 mask &= ~SDE_HOTPLUG_MASK;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002582 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002583 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2584 mask |= hpd_ibx[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002585 } else {
Egbert Eich995e6b32013-04-16 13:36:56 +02002586 mask &= ~SDE_HOTPLUG_MASK_CPT;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002587 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002588 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2589 mask |= hpd_cpt[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002590 }
2591
2592 I915_WRITE(SDEIMR, ~mask);
2593
2594 /*
2595 * Enable digital hotplug on the PCH, and configure the DP short pulse
2596 * duration to 2ms (which is the minimum in the Display Port spec)
2597 *
2598 * This register is the same on all known PCH chips.
2599 */
Keith Packard7fe0b972011-09-19 13:31:02 -07002600 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2601 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2602 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2603 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2604 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2605 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2606}
2607
Paulo Zanonid46da432013-02-08 17:35:15 -02002608static void ibx_irq_postinstall(struct drm_device *dev)
2609{
2610 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002611 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02002612
Daniel Vetter692a04c2013-05-29 21:43:05 +02002613 if (HAS_PCH_NOP(dev))
2614 return;
2615
Paulo Zanoni86642812013-04-12 17:57:57 -03002616 if (HAS_PCH_IBX(dev)) {
2617 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
Paulo Zanonide032bf2013-04-12 17:57:58 -03002618 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
Paulo Zanoni86642812013-04-12 17:57:57 -03002619 } else {
2620 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2621
2622 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2623 }
Ben Widawskyab5c6082013-04-05 13:12:41 -07002624
Paulo Zanonid46da432013-02-08 17:35:15 -02002625 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2626 I915_WRITE(SDEIMR, ~mask);
Paulo Zanonid46da432013-02-08 17:35:15 -02002627}
2628
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002629static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002630{
2631 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2632 /* enable kind of interrupts always enabled */
Jesse Barnes013d5aa2010-01-29 11:18:31 -08002633 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
Daniel Vetterce99c252012-12-01 13:53:47 +01002634 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
Paulo Zanoni86642812013-04-12 17:57:57 -03002635 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
Paulo Zanonide032bf2013-04-12 17:57:58 -03002636 DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002637 u32 render_irqs;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002638
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002639 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002640
2641 /* should always can generate irq */
2642 I915_WRITE(DEIIR, I915_READ(DEIIR));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002643 I915_WRITE(DEIMR, dev_priv->irq_mask);
2644 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002645 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002646
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002647 dev_priv->gt_irq_mask = ~0;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002648
2649 I915_WRITE(GTIIR, I915_READ(GTIIR));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002650 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002651
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002652 if (IS_GEN6(dev))
2653 render_irqs =
2654 GT_USER_INTERRUPT |
Ben Widawskye2a1e2f2012-03-29 19:11:26 -07002655 GEN6_BSD_USER_INTERRUPT |
2656 GEN6_BLITTER_USER_INTERRUPT;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002657 else
2658 render_irqs =
Chris Wilson88f23b82010-12-05 15:08:31 +00002659 GT_USER_INTERRUPT |
Chris Wilsonc6df5412010-12-15 09:56:50 +00002660 GT_PIPE_NOTIFY |
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002661 GT_BSD_USER_INTERRUPT;
2662 I915_WRITE(GTIER, render_irqs);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002663 POSTING_READ(GTIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002664
Paulo Zanonid46da432013-02-08 17:35:15 -02002665 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07002666
Jesse Barnesf97108d2010-01-29 11:27:07 -08002667 if (IS_IRONLAKE_M(dev)) {
2668 /* Clear & enable PCU event interrupts */
2669 I915_WRITE(DEIIR, DE_PCU_EVENT);
2670 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
2671 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2672 }
2673
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002674 return 0;
2675}
2676
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002677static int ivybridge_irq_postinstall(struct drm_device *dev)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002678{
2679 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2680 /* enable kind of interrupts always enabled */
Chris Wilsonb615b572012-05-02 09:52:12 +01002681 u32 display_mask =
2682 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2683 DE_PLANEC_FLIP_DONE_IVB |
2684 DE_PLANEB_FLIP_DONE_IVB |
Daniel Vetterce99c252012-12-01 13:53:47 +01002685 DE_PLANEA_FLIP_DONE_IVB |
Paulo Zanoni86642812013-04-12 17:57:57 -03002686 DE_AUX_CHANNEL_A_IVB |
2687 DE_ERR_INT_IVB;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002688 u32 render_irqs;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002689
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002690 dev_priv->irq_mask = ~display_mask;
2691
2692 /* should always can generate irq */
Paulo Zanoni86642812013-04-12 17:57:57 -03002693 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002694 I915_WRITE(DEIIR, I915_READ(DEIIR));
2695 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilsonb615b572012-05-02 09:52:12 +01002696 I915_WRITE(DEIER,
2697 display_mask |
2698 DE_PIPEC_VBLANK_IVB |
2699 DE_PIPEB_VBLANK_IVB |
2700 DE_PIPEA_VBLANK_IVB);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002701 POSTING_READ(DEIER);
2702
Ben Widawsky15b9f802012-05-25 16:56:23 -07002703 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002704
2705 I915_WRITE(GTIIR, I915_READ(GTIIR));
2706 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2707
Ben Widawskye2a1e2f2012-03-29 19:11:26 -07002708 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
Ben Widawsky15b9f802012-05-25 16:56:23 -07002709 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002710 I915_WRITE(GTIER, render_irqs);
2711 POSTING_READ(GTIER);
2712
Paulo Zanonid46da432013-02-08 17:35:15 -02002713 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07002714
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002715 return 0;
2716}
2717
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002718static int valleyview_irq_postinstall(struct drm_device *dev)
2719{
2720 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002721 u32 enable_mask;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002722 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
Jesse Barnes3bcedbe2012-09-19 13:29:01 -07002723 u32 render_irqs;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002724
2725 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002726 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2727 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2728 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002729 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2730
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002731 /*
2732 *Leave vblank interrupts masked initially. enable/disable will
2733 * toggle them based on usage.
2734 */
2735 dev_priv->irq_mask = (~enable_mask) |
2736 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2737 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002738
Daniel Vetter20afbda2012-12-11 14:05:07 +01002739 I915_WRITE(PORT_HOTPLUG_EN, 0);
2740 POSTING_READ(PORT_HOTPLUG_EN);
2741
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002742 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2743 I915_WRITE(VLV_IER, enable_mask);
2744 I915_WRITE(VLV_IIR, 0xffffffff);
2745 I915_WRITE(PIPESTAT(0), 0xffff);
2746 I915_WRITE(PIPESTAT(1), 0xffff);
2747 POSTING_READ(VLV_IER);
2748
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002749 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01002750 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002751 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2752
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002753 I915_WRITE(VLV_IIR, 0xffffffff);
2754 I915_WRITE(VLV_IIR, 0xffffffff);
2755
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002756 I915_WRITE(GTIIR, I915_READ(GTIIR));
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002757 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
Jesse Barnes3bcedbe2012-09-19 13:29:01 -07002758
2759 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2760 GEN6_BLITTER_USER_INTERRUPT;
2761 I915_WRITE(GTIER, render_irqs);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002762 POSTING_READ(GTIER);
2763
2764 /* ack & enable invalid PTE error interrupts */
2765#if 0 /* FIXME: add support to irq handler for checking these bits */
2766 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2767 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2768#endif
2769
2770 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Daniel Vetter20afbda2012-12-11 14:05:07 +01002771
2772 return 0;
2773}
2774
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002775static void valleyview_irq_uninstall(struct drm_device *dev)
2776{
2777 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2778 int pipe;
2779
2780 if (!dev_priv)
2781 return;
2782
Egbert Eichac4c16c2013-04-16 13:36:58 +02002783 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2784
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002785 for_each_pipe(pipe)
2786 I915_WRITE(PIPESTAT(pipe), 0xffff);
2787
2788 I915_WRITE(HWSTAM, 0xffffffff);
2789 I915_WRITE(PORT_HOTPLUG_EN, 0);
2790 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2791 for_each_pipe(pipe)
2792 I915_WRITE(PIPESTAT(pipe), 0xffff);
2793 I915_WRITE(VLV_IIR, 0xffffffff);
2794 I915_WRITE(VLV_IMR, 0xffffffff);
2795 I915_WRITE(VLV_IER, 0x0);
2796 POSTING_READ(VLV_IER);
2797}
2798
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002799static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002800{
2801 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes46979952011-04-07 13:53:55 -07002802
2803 if (!dev_priv)
2804 return;
2805
Egbert Eichac4c16c2013-04-16 13:36:58 +02002806 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2807
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002808 I915_WRITE(HWSTAM, 0xffffffff);
2809
2810 I915_WRITE(DEIMR, 0xffffffff);
2811 I915_WRITE(DEIER, 0x0);
2812 I915_WRITE(DEIIR, I915_READ(DEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03002813 if (IS_GEN7(dev))
2814 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002815
2816 I915_WRITE(GTIMR, 0xffffffff);
2817 I915_WRITE(GTIER, 0x0);
2818 I915_WRITE(GTIIR, I915_READ(GTIIR));
Keith Packard192aac1f2011-09-20 10:12:44 -07002819
Ben Widawskyab5c6082013-04-05 13:12:41 -07002820 if (HAS_PCH_NOP(dev))
2821 return;
2822
Keith Packard192aac1f2011-09-20 10:12:44 -07002823 I915_WRITE(SDEIMR, 0xffffffff);
2824 I915_WRITE(SDEIER, 0x0);
2825 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03002826 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2827 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002828}
2829
Chris Wilsonc2798b12012-04-22 21:13:57 +01002830static void i8xx_irq_preinstall(struct drm_device * dev)
2831{
2832 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2833 int pipe;
2834
2835 atomic_set(&dev_priv->irq_received, 0);
2836
2837 for_each_pipe(pipe)
2838 I915_WRITE(PIPESTAT(pipe), 0);
2839 I915_WRITE16(IMR, 0xffff);
2840 I915_WRITE16(IER, 0x0);
2841 POSTING_READ16(IER);
2842}
2843
2844static int i8xx_irq_postinstall(struct drm_device *dev)
2845{
2846 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2847
Chris Wilsonc2798b12012-04-22 21:13:57 +01002848 I915_WRITE16(EMR,
2849 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2850
2851 /* Unmask the interrupts that we always want on. */
2852 dev_priv->irq_mask =
2853 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2854 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2855 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2856 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2857 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2858 I915_WRITE16(IMR, dev_priv->irq_mask);
2859
2860 I915_WRITE16(IER,
2861 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2862 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2863 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2864 I915_USER_INTERRUPT);
2865 POSTING_READ16(IER);
2866
2867 return 0;
2868}
2869
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002870/*
2871 * Returns true when a page flip has completed.
2872 */
2873static bool i8xx_handle_vblank(struct drm_device *dev,
2874 int pipe, u16 iir)
2875{
2876 drm_i915_private_t *dev_priv = dev->dev_private;
2877 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2878
2879 if (!drm_handle_vblank(dev, pipe))
2880 return false;
2881
2882 if ((iir & flip_pending) == 0)
2883 return false;
2884
2885 intel_prepare_page_flip(dev, pipe);
2886
2887 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2888 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2889 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2890 * the flip is completed (no longer pending). Since this doesn't raise
2891 * an interrupt per se, we watch for the change at vblank.
2892 */
2893 if (I915_READ16(ISR) & flip_pending)
2894 return false;
2895
2896 intel_finish_page_flip(dev, pipe);
2897
2898 return true;
2899}
2900
Daniel Vetterff1f5252012-10-02 15:10:55 +02002901static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01002902{
2903 struct drm_device *dev = (struct drm_device *) arg;
2904 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01002905 u16 iir, new_iir;
2906 u32 pipe_stats[2];
2907 unsigned long irqflags;
2908 int irq_received;
2909 int pipe;
2910 u16 flip_mask =
2911 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2912 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2913
2914 atomic_inc(&dev_priv->irq_received);
2915
2916 iir = I915_READ16(IIR);
2917 if (iir == 0)
2918 return IRQ_NONE;
2919
2920 while (iir & ~flip_mask) {
2921 /* Can't rely on pipestat interrupt bit in iir as it might
2922 * have been cleared after the pipestat interrupt was received.
2923 * It doesn't set the bit in iir again, but it still produces
2924 * interrupts (for non-MSI).
2925 */
2926 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2927 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2928 i915_handle_error(dev, false);
2929
2930 for_each_pipe(pipe) {
2931 int reg = PIPESTAT(pipe);
2932 pipe_stats[pipe] = I915_READ(reg);
2933
2934 /*
2935 * Clear the PIPE*STAT regs before the IIR
2936 */
2937 if (pipe_stats[pipe] & 0x8000ffff) {
2938 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2939 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2940 pipe_name(pipe));
2941 I915_WRITE(reg, pipe_stats[pipe]);
2942 irq_received = 1;
2943 }
2944 }
2945 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2946
2947 I915_WRITE16(IIR, iir & ~flip_mask);
2948 new_iir = I915_READ16(IIR); /* Flush posted writes */
2949
Daniel Vetterd05c6172012-04-26 23:28:09 +02002950 i915_update_dri1_breadcrumb(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002951
2952 if (iir & I915_USER_INTERRUPT)
2953 notify_ring(dev, &dev_priv->ring[RCS]);
2954
2955 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002956 i8xx_handle_vblank(dev, 0, iir))
2957 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002958
2959 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002960 i8xx_handle_vblank(dev, 1, iir))
2961 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
Chris Wilsonc2798b12012-04-22 21:13:57 +01002962
2963 iir = new_iir;
2964 }
2965
2966 return IRQ_HANDLED;
2967}
2968
2969static void i8xx_irq_uninstall(struct drm_device * dev)
2970{
2971 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2972 int pipe;
2973
Chris Wilsonc2798b12012-04-22 21:13:57 +01002974 for_each_pipe(pipe) {
2975 /* Clear enable bits; then clear status bits */
2976 I915_WRITE(PIPESTAT(pipe), 0);
2977 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2978 }
2979 I915_WRITE16(IMR, 0xffff);
2980 I915_WRITE16(IER, 0x0);
2981 I915_WRITE16(IIR, I915_READ16(IIR));
2982}
2983
Chris Wilsona266c7d2012-04-24 22:59:44 +01002984static void i915_irq_preinstall(struct drm_device * dev)
2985{
2986 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2987 int pipe;
2988
2989 atomic_set(&dev_priv->irq_received, 0);
2990
2991 if (I915_HAS_HOTPLUG(dev)) {
2992 I915_WRITE(PORT_HOTPLUG_EN, 0);
2993 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2994 }
2995
Chris Wilson00d98eb2012-04-24 22:59:48 +01002996 I915_WRITE16(HWSTAM, 0xeffe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01002997 for_each_pipe(pipe)
2998 I915_WRITE(PIPESTAT(pipe), 0);
2999 I915_WRITE(IMR, 0xffffffff);
3000 I915_WRITE(IER, 0x0);
3001 POSTING_READ(IER);
3002}
3003
3004static int i915_irq_postinstall(struct drm_device *dev)
3005{
3006 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson38bde182012-04-24 22:59:50 +01003007 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003008
Chris Wilson38bde182012-04-24 22:59:50 +01003009 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3010
3011 /* Unmask the interrupts that we always want on. */
3012 dev_priv->irq_mask =
3013 ~(I915_ASLE_INTERRUPT |
3014 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3015 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3016 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3017 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3018 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3019
3020 enable_mask =
3021 I915_ASLE_INTERRUPT |
3022 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3023 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3024 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3025 I915_USER_INTERRUPT;
3026
Chris Wilsona266c7d2012-04-24 22:59:44 +01003027 if (I915_HAS_HOTPLUG(dev)) {
Daniel Vetter20afbda2012-12-11 14:05:07 +01003028 I915_WRITE(PORT_HOTPLUG_EN, 0);
3029 POSTING_READ(PORT_HOTPLUG_EN);
3030
Chris Wilsona266c7d2012-04-24 22:59:44 +01003031 /* Enable in IER... */
3032 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3033 /* and unmask in IMR */
3034 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3035 }
3036
Chris Wilsona266c7d2012-04-24 22:59:44 +01003037 I915_WRITE(IMR, dev_priv->irq_mask);
3038 I915_WRITE(IER, enable_mask);
3039 POSTING_READ(IER);
3040
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003041 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003042
3043 return 0;
3044}
3045
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003046/*
3047 * Returns true when a page flip has completed.
3048 */
3049static bool i915_handle_vblank(struct drm_device *dev,
3050 int plane, int pipe, u32 iir)
3051{
3052 drm_i915_private_t *dev_priv = dev->dev_private;
3053 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3054
3055 if (!drm_handle_vblank(dev, pipe))
3056 return false;
3057
3058 if ((iir & flip_pending) == 0)
3059 return false;
3060
3061 intel_prepare_page_flip(dev, plane);
3062
3063 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3064 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3065 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3066 * the flip is completed (no longer pending). Since this doesn't raise
3067 * an interrupt per se, we watch for the change at vblank.
3068 */
3069 if (I915_READ(ISR) & flip_pending)
3070 return false;
3071
3072 intel_finish_page_flip(dev, pipe);
3073
3074 return true;
3075}
3076
Daniel Vetterff1f5252012-10-02 15:10:55 +02003077static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003078{
3079 struct drm_device *dev = (struct drm_device *) arg;
3080 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson8291ee92012-04-24 22:59:47 +01003081 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003082 unsigned long irqflags;
Chris Wilson38bde182012-04-24 22:59:50 +01003083 u32 flip_mask =
3084 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3085 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilson38bde182012-04-24 22:59:50 +01003086 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003087
3088 atomic_inc(&dev_priv->irq_received);
3089
3090 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01003091 do {
3092 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01003093 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003094
3095 /* Can't rely on pipestat interrupt bit in iir as it might
3096 * have been cleared after the pipestat interrupt was received.
3097 * It doesn't set the bit in iir again, but it still produces
3098 * interrupts (for non-MSI).
3099 */
3100 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3101 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3102 i915_handle_error(dev, false);
3103
3104 for_each_pipe(pipe) {
3105 int reg = PIPESTAT(pipe);
3106 pipe_stats[pipe] = I915_READ(reg);
3107
Chris Wilson38bde182012-04-24 22:59:50 +01003108 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003109 if (pipe_stats[pipe] & 0x8000ffff) {
3110 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3111 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3112 pipe_name(pipe));
3113 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01003114 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003115 }
3116 }
3117 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3118
3119 if (!irq_received)
3120 break;
3121
Chris Wilsona266c7d2012-04-24 22:59:44 +01003122 /* Consume port. Then clear IIR or we'll miss events */
3123 if ((I915_HAS_HOTPLUG(dev)) &&
3124 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3125 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02003126 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003127
3128 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3129 hotplug_status);
Egbert Eichb543fb02013-04-16 13:36:54 +02003130 if (hotplug_trigger) {
Egbert Eichcd569ae2013-04-16 13:36:57 +02003131 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
3132 i915_hpd_irq_setup(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003133 queue_work(dev_priv->wq,
3134 &dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02003135 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003136 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
Chris Wilson38bde182012-04-24 22:59:50 +01003137 POSTING_READ(PORT_HOTPLUG_STAT);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003138 }
3139
Chris Wilson38bde182012-04-24 22:59:50 +01003140 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003141 new_iir = I915_READ(IIR); /* Flush posted writes */
3142
Chris Wilsona266c7d2012-04-24 22:59:44 +01003143 if (iir & I915_USER_INTERRUPT)
3144 notify_ring(dev, &dev_priv->ring[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003145
Chris Wilsona266c7d2012-04-24 22:59:44 +01003146 for_each_pipe(pipe) {
Chris Wilson38bde182012-04-24 22:59:50 +01003147 int plane = pipe;
3148 if (IS_MOBILE(dev))
3149 plane = !plane;
Ville Syrjälä5e2032d2013-02-19 15:16:38 +02003150
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003151 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3152 i915_handle_vblank(dev, plane, pipe, iir))
3153 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003154
3155 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3156 blc_event = true;
3157 }
3158
Chris Wilsona266c7d2012-04-24 22:59:44 +01003159 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3160 intel_opregion_asle_intr(dev);
3161
3162 /* With MSI, interrupts are only generated when iir
3163 * transitions from zero to nonzero. If another bit got
3164 * set while we were handling the existing iir bits, then
3165 * we would never get another interrupt.
3166 *
3167 * This is fine on non-MSI as well, as if we hit this path
3168 * we avoid exiting the interrupt handler only to generate
3169 * another one.
3170 *
3171 * Note that for MSI this could cause a stray interrupt report
3172 * if an interrupt landed in the time between writing IIR and
3173 * the posting read. This should be rare enough to never
3174 * trigger the 99% of 100,000 interrupts test for disabling
3175 * stray interrupts.
3176 */
Chris Wilson38bde182012-04-24 22:59:50 +01003177 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003178 iir = new_iir;
Chris Wilson38bde182012-04-24 22:59:50 +01003179 } while (iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003180
Daniel Vetterd05c6172012-04-26 23:28:09 +02003181 i915_update_dri1_breadcrumb(dev);
Chris Wilson8291ee92012-04-24 22:59:47 +01003182
Chris Wilsona266c7d2012-04-24 22:59:44 +01003183 return ret;
3184}
3185
3186static void i915_irq_uninstall(struct drm_device * dev)
3187{
3188 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3189 int pipe;
3190
Egbert Eichac4c16c2013-04-16 13:36:58 +02003191 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3192
Chris Wilsona266c7d2012-04-24 22:59:44 +01003193 if (I915_HAS_HOTPLUG(dev)) {
3194 I915_WRITE(PORT_HOTPLUG_EN, 0);
3195 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3196 }
3197
Chris Wilson00d98eb2012-04-24 22:59:48 +01003198 I915_WRITE16(HWSTAM, 0xffff);
Chris Wilson55b39752012-04-24 22:59:49 +01003199 for_each_pipe(pipe) {
3200 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003201 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01003202 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3203 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003204 I915_WRITE(IMR, 0xffffffff);
3205 I915_WRITE(IER, 0x0);
3206
Chris Wilsona266c7d2012-04-24 22:59:44 +01003207 I915_WRITE(IIR, I915_READ(IIR));
3208}
3209
3210static void i965_irq_preinstall(struct drm_device * dev)
3211{
3212 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3213 int pipe;
3214
3215 atomic_set(&dev_priv->irq_received, 0);
3216
Chris Wilsonadca4732012-05-11 18:01:31 +01003217 I915_WRITE(PORT_HOTPLUG_EN, 0);
3218 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003219
3220 I915_WRITE(HWSTAM, 0xeffe);
3221 for_each_pipe(pipe)
3222 I915_WRITE(PIPESTAT(pipe), 0);
3223 I915_WRITE(IMR, 0xffffffff);
3224 I915_WRITE(IER, 0x0);
3225 POSTING_READ(IER);
3226}
3227
3228static int i965_irq_postinstall(struct drm_device *dev)
3229{
3230 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003231 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003232 u32 error_mask;
3233
Chris Wilsona266c7d2012-04-24 22:59:44 +01003234 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003235 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01003236 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003237 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3238 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3239 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3240 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3241 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3242
3243 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003244 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3245 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003246 enable_mask |= I915_USER_INTERRUPT;
3247
3248 if (IS_G4X(dev))
3249 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003250
Daniel Vetter515ac2b2012-12-01 13:53:44 +01003251 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003252
Chris Wilsona266c7d2012-04-24 22:59:44 +01003253 /*
3254 * Enable some error detection, note the instruction error mask
3255 * bit is reserved, so we leave it masked.
3256 */
3257 if (IS_G4X(dev)) {
3258 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3259 GM45_ERROR_MEM_PRIV |
3260 GM45_ERROR_CP_PRIV |
3261 I915_ERROR_MEMORY_REFRESH);
3262 } else {
3263 error_mask = ~(I915_ERROR_PAGE_TABLE |
3264 I915_ERROR_MEMORY_REFRESH);
3265 }
3266 I915_WRITE(EMR, error_mask);
3267
3268 I915_WRITE(IMR, dev_priv->irq_mask);
3269 I915_WRITE(IER, enable_mask);
3270 POSTING_READ(IER);
3271
Daniel Vetter20afbda2012-12-11 14:05:07 +01003272 I915_WRITE(PORT_HOTPLUG_EN, 0);
3273 POSTING_READ(PORT_HOTPLUG_EN);
3274
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003275 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003276
3277 return 0;
3278}
3279
Egbert Eichbac56d52013-02-25 12:06:51 -05003280static void i915_hpd_irq_setup(struct drm_device *dev)
Daniel Vetter20afbda2012-12-11 14:05:07 +01003281{
3282 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Egbert Eiche5868a32013-02-28 04:17:12 -05003283 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +02003284 struct intel_encoder *intel_encoder;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003285 u32 hotplug_en;
3286
Egbert Eichbac56d52013-02-25 12:06:51 -05003287 if (I915_HAS_HOTPLUG(dev)) {
3288 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3289 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3290 /* Note HDMI and DP share hotplug bits */
Egbert Eiche5868a32013-02-28 04:17:12 -05003291 /* enable bits are the same for all generations */
Egbert Eichcd569ae2013-04-16 13:36:57 +02003292 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3293 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3294 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
Egbert Eichbac56d52013-02-25 12:06:51 -05003295 /* Programming the CRT detection parameters tends
3296 to generate a spurious hotplug event about three
3297 seconds later. So just do it once.
3298 */
3299 if (IS_G4X(dev))
3300 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Daniel Vetter85fc95b2013-03-27 15:47:11 +01003301 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
Egbert Eichbac56d52013-02-25 12:06:51 -05003302 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003303
Egbert Eichbac56d52013-02-25 12:06:51 -05003304 /* Ignore TV since it's buggy */
3305 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3306 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003307}
3308
Daniel Vetterff1f5252012-10-02 15:10:55 +02003309static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003310{
3311 struct drm_device *dev = (struct drm_device *) arg;
3312 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003313 u32 iir, new_iir;
3314 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003315 unsigned long irqflags;
3316 int irq_received;
3317 int ret = IRQ_NONE, pipe;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003318 u32 flip_mask =
3319 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3320 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003321
3322 atomic_inc(&dev_priv->irq_received);
3323
3324 iir = I915_READ(IIR);
3325
Chris Wilsona266c7d2012-04-24 22:59:44 +01003326 for (;;) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01003327 bool blc_event = false;
3328
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003329 irq_received = (iir & ~flip_mask) != 0;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003330
3331 /* Can't rely on pipestat interrupt bit in iir as it might
3332 * have been cleared after the pipestat interrupt was received.
3333 * It doesn't set the bit in iir again, but it still produces
3334 * interrupts (for non-MSI).
3335 */
3336 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3337 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3338 i915_handle_error(dev, false);
3339
3340 for_each_pipe(pipe) {
3341 int reg = PIPESTAT(pipe);
3342 pipe_stats[pipe] = I915_READ(reg);
3343
3344 /*
3345 * Clear the PIPE*STAT regs before the IIR
3346 */
3347 if (pipe_stats[pipe] & 0x8000ffff) {
3348 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3349 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3350 pipe_name(pipe));
3351 I915_WRITE(reg, pipe_stats[pipe]);
3352 irq_received = 1;
3353 }
3354 }
3355 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3356
3357 if (!irq_received)
3358 break;
3359
3360 ret = IRQ_HANDLED;
3361
3362 /* Consume port. Then clear IIR or we'll miss events */
Chris Wilsonadca4732012-05-11 18:01:31 +01003363 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003364 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02003365 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3366 HOTPLUG_INT_STATUS_G4X :
3367 HOTPLUG_INT_STATUS_I965);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003368
3369 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3370 hotplug_status);
Egbert Eichb543fb02013-04-16 13:36:54 +02003371 if (hotplug_trigger) {
Egbert Eichcd569ae2013-04-16 13:36:57 +02003372 if (hotplug_irq_storm_detect(dev, hotplug_trigger,
3373 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
3374 i915_hpd_irq_setup(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003375 queue_work(dev_priv->wq,
3376 &dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02003377 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003378 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3379 I915_READ(PORT_HOTPLUG_STAT);
3380 }
3381
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003382 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003383 new_iir = I915_READ(IIR); /* Flush posted writes */
3384
Chris Wilsona266c7d2012-04-24 22:59:44 +01003385 if (iir & I915_USER_INTERRUPT)
3386 notify_ring(dev, &dev_priv->ring[RCS]);
3387 if (iir & I915_BSD_USER_INTERRUPT)
3388 notify_ring(dev, &dev_priv->ring[VCS]);
3389
Chris Wilsona266c7d2012-04-24 22:59:44 +01003390 for_each_pipe(pipe) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01003391 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003392 i915_handle_vblank(dev, pipe, pipe, iir))
3393 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003394
3395 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3396 blc_event = true;
3397 }
3398
3399
3400 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3401 intel_opregion_asle_intr(dev);
3402
Daniel Vetter515ac2b2012-12-01 13:53:44 +01003403 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3404 gmbus_irq_handler(dev);
3405
Chris Wilsona266c7d2012-04-24 22:59:44 +01003406 /* With MSI, interrupts are only generated when iir
3407 * transitions from zero to nonzero. If another bit got
3408 * set while we were handling the existing iir bits, then
3409 * we would never get another interrupt.
3410 *
3411 * This is fine on non-MSI as well, as if we hit this path
3412 * we avoid exiting the interrupt handler only to generate
3413 * another one.
3414 *
3415 * Note that for MSI this could cause a stray interrupt report
3416 * if an interrupt landed in the time between writing IIR and
3417 * the posting read. This should be rare enough to never
3418 * trigger the 99% of 100,000 interrupts test for disabling
3419 * stray interrupts.
3420 */
3421 iir = new_iir;
3422 }
3423
Daniel Vetterd05c6172012-04-26 23:28:09 +02003424 i915_update_dri1_breadcrumb(dev);
Chris Wilson2c8ba292012-04-24 22:59:46 +01003425
Chris Wilsona266c7d2012-04-24 22:59:44 +01003426 return ret;
3427}
3428
3429static void i965_irq_uninstall(struct drm_device * dev)
3430{
3431 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3432 int pipe;
3433
3434 if (!dev_priv)
3435 return;
3436
Egbert Eichac4c16c2013-04-16 13:36:58 +02003437 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3438
Chris Wilsonadca4732012-05-11 18:01:31 +01003439 I915_WRITE(PORT_HOTPLUG_EN, 0);
3440 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003441
3442 I915_WRITE(HWSTAM, 0xffffffff);
3443 for_each_pipe(pipe)
3444 I915_WRITE(PIPESTAT(pipe), 0);
3445 I915_WRITE(IMR, 0xffffffff);
3446 I915_WRITE(IER, 0x0);
3447
3448 for_each_pipe(pipe)
3449 I915_WRITE(PIPESTAT(pipe),
3450 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3451 I915_WRITE(IIR, I915_READ(IIR));
3452}
3453
Egbert Eichac4c16c2013-04-16 13:36:58 +02003454static void i915_reenable_hotplug_timer_func(unsigned long data)
3455{
3456 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3457 struct drm_device *dev = dev_priv->dev;
3458 struct drm_mode_config *mode_config = &dev->mode_config;
3459 unsigned long irqflags;
3460 int i;
3461
3462 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3463 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3464 struct drm_connector *connector;
3465
3466 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3467 continue;
3468
3469 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3470
3471 list_for_each_entry(connector, &mode_config->connector_list, head) {
3472 struct intel_connector *intel_connector = to_intel_connector(connector);
3473
3474 if (intel_connector->encoder->hpd_pin == i) {
3475 if (connector->polled != intel_connector->polled)
3476 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3477 drm_get_connector_name(connector));
3478 connector->polled = intel_connector->polled;
3479 if (!connector->polled)
3480 connector->polled = DRM_CONNECTOR_POLL_HPD;
3481 }
3482 }
3483 }
3484 if (dev_priv->display.hpd_irq_setup)
3485 dev_priv->display.hpd_irq_setup(dev);
3486 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3487}
3488
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003489void intel_irq_init(struct drm_device *dev)
3490{
Chris Wilson8b2e3262012-04-24 22:59:41 +01003491 struct drm_i915_private *dev_priv = dev->dev_private;
3492
3493 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
Daniel Vetter99584db2012-11-14 17:14:04 +01003494 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003495 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01003496 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Chris Wilson8b2e3262012-04-24 22:59:41 +01003497
Daniel Vetter99584db2012-11-14 17:14:04 +01003498 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3499 i915_hangcheck_elapsed,
Daniel Vetter61bac782012-12-01 21:03:21 +01003500 (unsigned long) dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003501 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3502 (unsigned long) dev_priv);
Daniel Vetter61bac782012-12-01 21:03:21 +01003503
Tomas Janousek97a19a22012-12-08 13:48:13 +01003504 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01003505
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003506 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3507 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Eugeni Dodonov7d4e1462012-05-09 15:37:09 -03003508 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003509 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3510 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3511 }
3512
Keith Packardc3613de2011-08-12 17:05:54 -07003513 if (drm_core_check_feature(dev, DRIVER_MODESET))
3514 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3515 else
3516 dev->driver->get_vblank_timestamp = NULL;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003517 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3518
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003519 if (IS_VALLEYVIEW(dev)) {
3520 dev->driver->irq_handler = valleyview_irq_handler;
3521 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3522 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3523 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3524 dev->driver->enable_vblank = valleyview_enable_vblank;
3525 dev->driver->disable_vblank = valleyview_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05003526 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Daniel Vetter4a06e202012-12-01 13:53:40 +01003527 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
Ben Widawsky7d991632013-05-28 19:22:25 -07003528 /* Share uninstall handlers with ILK/SNB */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003529 dev->driver->irq_handler = ivybridge_irq_handler;
Ben Widawsky7d991632013-05-28 19:22:25 -07003530 dev->driver->irq_preinstall = ivybridge_irq_preinstall;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003531 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3532 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3533 dev->driver->enable_vblank = ivybridge_enable_vblank;
3534 dev->driver->disable_vblank = ivybridge_disable_vblank;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003535 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003536 } else if (HAS_PCH_SPLIT(dev)) {
3537 dev->driver->irq_handler = ironlake_irq_handler;
3538 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3539 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3540 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3541 dev->driver->enable_vblank = ironlake_enable_vblank;
3542 dev->driver->disable_vblank = ironlake_disable_vblank;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003543 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003544 } else {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003545 if (INTEL_INFO(dev)->gen == 2) {
3546 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3547 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3548 dev->driver->irq_handler = i8xx_irq_handler;
3549 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003550 } else if (INTEL_INFO(dev)->gen == 3) {
3551 dev->driver->irq_preinstall = i915_irq_preinstall;
3552 dev->driver->irq_postinstall = i915_irq_postinstall;
3553 dev->driver->irq_uninstall = i915_irq_uninstall;
3554 dev->driver->irq_handler = i915_irq_handler;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003555 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003556 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003557 dev->driver->irq_preinstall = i965_irq_preinstall;
3558 dev->driver->irq_postinstall = i965_irq_postinstall;
3559 dev->driver->irq_uninstall = i965_irq_uninstall;
3560 dev->driver->irq_handler = i965_irq_handler;
Egbert Eichbac56d52013-02-25 12:06:51 -05003561 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003562 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003563 dev->driver->enable_vblank = i915_enable_vblank;
3564 dev->driver->disable_vblank = i915_disable_vblank;
3565 }
3566}
Daniel Vetter20afbda2012-12-11 14:05:07 +01003567
3568void intel_hpd_init(struct drm_device *dev)
3569{
3570 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eich821450c2013-04-16 13:36:55 +02003571 struct drm_mode_config *mode_config = &dev->mode_config;
3572 struct drm_connector *connector;
3573 int i;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003574
Egbert Eich821450c2013-04-16 13:36:55 +02003575 for (i = 1; i < HPD_NUM_PINS; i++) {
3576 dev_priv->hpd_stats[i].hpd_cnt = 0;
3577 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3578 }
3579 list_for_each_entry(connector, &mode_config->connector_list, head) {
3580 struct intel_connector *intel_connector = to_intel_connector(connector);
3581 connector->polled = intel_connector->polled;
3582 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3583 connector->polled = DRM_CONNECTOR_POLL_HPD;
3584 }
Daniel Vetter20afbda2012-12-11 14:05:07 +01003585 if (dev_priv->display.hpd_irq_setup)
3586 dev_priv->display.hpd_irq_setup(dev);
3587}