blob: bff9abda81c6caf765ea088e2bf5e362ca5c53d3 [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
34#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010036#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Egbert Eiche5868a32013-02-28 04:17:12 -050039static const u32 hpd_ibx[] = {
40 [HPD_CRT] = SDE_CRT_HOTPLUG,
41 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
45};
46
47static const u32 hpd_cpt[] = {
48 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010049 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050050 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53};
54
55static const u32 hpd_mask_i915[] = {
56 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62};
63
64static const u32 hpd_status_gen4[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71};
72
Egbert Eiche5868a32013-02-28 04:17:12 -050073static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80};
81
Egbert Eichcd569ae2013-04-16 13:36:57 +020082static void ibx_hpd_irq_setup(struct drm_device *dev);
83static void i915_hpd_irq_setup(struct drm_device *dev);
Egbert Eiche5868a32013-02-28 04:17:12 -050084
Zhenyu Wang036a4a72009-06-08 14:40:19 +080085/* For display hotplug interrupt */
Chris Wilson995b6762010-08-20 13:23:26 +010086static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -050087ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +080088{
Daniel Vetter4bc9d432013-06-27 13:44:58 +020089 assert_spin_locked(&dev_priv->irq_lock);
90
Chris Wilson1ec14ad2010-12-04 11:30:53 +000091 if ((dev_priv->irq_mask & mask) != 0) {
92 dev_priv->irq_mask &= ~mask;
93 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +000094 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +080095 }
96}
97
Paulo Zanoni0ff98002013-02-22 17:05:31 -030098static void
Adam Jacksonf2b115e2009-12-03 17:14:42 -050099ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800100{
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200101 assert_spin_locked(&dev_priv->irq_lock);
102
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000103 if ((dev_priv->irq_mask & mask) != mask) {
104 dev_priv->irq_mask |= mask;
105 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000106 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800107 }
108}
109
Paulo Zanoni86642812013-04-12 17:57:57 -0300110static bool ivb_can_enable_err_int(struct drm_device *dev)
111{
112 struct drm_i915_private *dev_priv = dev->dev_private;
113 struct intel_crtc *crtc;
114 enum pipe pipe;
115
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200116 assert_spin_locked(&dev_priv->irq_lock);
117
Paulo Zanoni86642812013-04-12 17:57:57 -0300118 for_each_pipe(pipe) {
119 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
120
121 if (crtc->cpu_fifo_underrun_disabled)
122 return false;
123 }
124
125 return true;
126}
127
128static bool cpt_can_enable_serr_int(struct drm_device *dev)
129{
130 struct drm_i915_private *dev_priv = dev->dev_private;
131 enum pipe pipe;
132 struct intel_crtc *crtc;
133
134 for_each_pipe(pipe) {
135 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
136
137 if (crtc->pch_fifo_underrun_disabled)
138 return false;
139 }
140
141 return true;
142}
143
144static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
145 enum pipe pipe, bool enable)
146{
147 struct drm_i915_private *dev_priv = dev->dev_private;
148 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
149 DE_PIPEB_FIFO_UNDERRUN;
150
151 if (enable)
152 ironlake_enable_display_irq(dev_priv, bit);
153 else
154 ironlake_disable_display_irq(dev_priv, bit);
155}
156
157static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
158 bool enable)
159{
160 struct drm_i915_private *dev_priv = dev->dev_private;
161
162 if (enable) {
163 if (!ivb_can_enable_err_int(dev))
164 return;
165
166 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
167 ERR_INT_FIFO_UNDERRUN_B |
168 ERR_INT_FIFO_UNDERRUN_C);
169
170 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
171 } else {
172 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
173 }
174}
175
176static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
177 bool enable)
178{
179 struct drm_device *dev = crtc->base.dev;
180 struct drm_i915_private *dev_priv = dev->dev_private;
181 uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
182 SDE_TRANSB_FIFO_UNDER;
183
184 if (enable)
185 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
186 else
187 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
188
189 POSTING_READ(SDEIMR);
190}
191
192static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
193 enum transcoder pch_transcoder,
194 bool enable)
195{
196 struct drm_i915_private *dev_priv = dev->dev_private;
197
198 if (enable) {
199 if (!cpt_can_enable_serr_int(dev))
200 return;
201
202 I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
203 SERR_INT_TRANS_B_FIFO_UNDERRUN |
204 SERR_INT_TRANS_C_FIFO_UNDERRUN);
205
206 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
207 } else {
208 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
209 }
210
211 POSTING_READ(SDEIMR);
212}
213
214/**
215 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
216 * @dev: drm device
217 * @pipe: pipe
218 * @enable: true if we want to report FIFO underrun errors, false otherwise
219 *
220 * This function makes us disable or enable CPU fifo underruns for a specific
221 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
222 * reporting for one pipe may also disable all the other CPU error interruts for
223 * the other pipes, due to the fact that there's just one interrupt mask/enable
224 * bit for all the pipes.
225 *
226 * Returns the previous state of underrun reporting.
227 */
228bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
229 enum pipe pipe, bool enable)
230{
231 struct drm_i915_private *dev_priv = dev->dev_private;
232 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
233 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
234 unsigned long flags;
235 bool ret;
236
237 spin_lock_irqsave(&dev_priv->irq_lock, flags);
238
239 ret = !intel_crtc->cpu_fifo_underrun_disabled;
240
241 if (enable == ret)
242 goto done;
243
244 intel_crtc->cpu_fifo_underrun_disabled = !enable;
245
246 if (IS_GEN5(dev) || IS_GEN6(dev))
247 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
248 else if (IS_GEN7(dev))
249 ivybridge_set_fifo_underrun_reporting(dev, enable);
250
251done:
252 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
253 return ret;
254}
255
256/**
257 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
258 * @dev: drm device
259 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
260 * @enable: true if we want to report FIFO underrun errors, false otherwise
261 *
262 * This function makes us disable or enable PCH fifo underruns for a specific
263 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
264 * underrun reporting for one transcoder may also disable all the other PCH
265 * error interruts for the other transcoders, due to the fact that there's just
266 * one interrupt mask/enable bit for all the transcoders.
267 *
268 * Returns the previous state of underrun reporting.
269 */
270bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
271 enum transcoder pch_transcoder,
272 bool enable)
273{
274 struct drm_i915_private *dev_priv = dev->dev_private;
275 enum pipe p;
276 struct drm_crtc *crtc;
277 struct intel_crtc *intel_crtc;
278 unsigned long flags;
279 bool ret;
280
281 if (HAS_PCH_LPT(dev)) {
282 crtc = NULL;
283 for_each_pipe(p) {
284 struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
285 if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
286 crtc = c;
287 break;
288 }
289 }
290 if (!crtc) {
291 DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
292 return false;
293 }
294 } else {
295 crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
296 }
297 intel_crtc = to_intel_crtc(crtc);
298
299 spin_lock_irqsave(&dev_priv->irq_lock, flags);
300
301 ret = !intel_crtc->pch_fifo_underrun_disabled;
302
303 if (enable == ret)
304 goto done;
305
306 intel_crtc->pch_fifo_underrun_disabled = !enable;
307
308 if (HAS_PCH_IBX(dev))
309 ibx_set_fifo_underrun_reporting(intel_crtc, enable);
310 else
311 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
312
313done:
314 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
315 return ret;
316}
317
318
Keith Packard7c463582008-11-04 02:03:27 -0800319void
320i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
321{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200322 u32 reg = PIPESTAT(pipe);
323 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800324
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200325 if ((pipestat & mask) == mask)
326 return;
327
328 /* Enable the interrupt, clear any pending status */
329 pipestat |= mask | (mask >> 16);
330 I915_WRITE(reg, pipestat);
331 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800332}
333
334void
335i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
336{
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200337 u32 reg = PIPESTAT(pipe);
338 u32 pipestat = I915_READ(reg) & 0x7fff0000;
Keith Packard7c463582008-11-04 02:03:27 -0800339
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200340 if ((pipestat & mask) == 0)
341 return;
342
343 pipestat &= ~mask;
344 I915_WRITE(reg, pipestat);
345 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800346}
347
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000348/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300349 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Zhao Yakui01c66882009-10-28 05:10:00 +0000350 */
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300351static void i915_enable_asle_pipestat(struct drm_device *dev)
Zhao Yakui01c66882009-10-28 05:10:00 +0000352{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000353 drm_i915_private_t *dev_priv = dev->dev_private;
354 unsigned long irqflags;
355
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300356 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
357 return;
358
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000359 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000360
Jani Nikulaf8987802013-04-29 13:02:53 +0300361 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
362 if (INTEL_INFO(dev)->gen >= 4)
363 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000364
365 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Zhao Yakui01c66882009-10-28 05:10:00 +0000366}
367
368/**
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700369 * i915_pipe_enabled - check if a pipe is enabled
370 * @dev: DRM device
371 * @pipe: pipe to check
372 *
373 * Reading certain registers when the pipe is disabled can hang the chip.
374 * Use this routine to make sure the PLL is running and the pipe is active
375 * before reading such registers if unsure.
376 */
377static int
378i915_pipe_enabled(struct drm_device *dev, int pipe)
379{
380 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Paulo Zanoni702e7a52012-10-23 18:29:59 -0200381
Daniel Vettera01025a2013-05-22 00:50:23 +0200382 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
383 /* Locking is horribly broken here, but whatever. */
384 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
385 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni71f8ba62013-05-03 12:15:39 -0300386
Daniel Vettera01025a2013-05-22 00:50:23 +0200387 return intel_crtc->active;
388 } else {
389 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
390 }
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700391}
392
Keith Packard42f52ef2008-10-18 19:39:29 -0700393/* Called from drm generic code, passed a 'crtc', which
394 * we use as a pipe index
395 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700396static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700397{
398 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
399 unsigned long high_frame;
400 unsigned long low_frame;
Chris Wilson5eddb702010-09-11 13:48:45 +0100401 u32 high1, high2, low;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700402
403 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800404 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800405 "pipe %c\n", pipe_name(pipe));
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700406 return 0;
407 }
408
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800409 high_frame = PIPEFRAME(pipe);
410 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100411
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700412 /*
413 * High & low register fields aren't synchronized, so make sure
414 * we get a low value that's stable across two reads of the high
415 * register.
416 */
417 do {
Chris Wilson5eddb702010-09-11 13:48:45 +0100418 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
419 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
420 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700421 } while (high1 != high2);
422
Chris Wilson5eddb702010-09-11 13:48:45 +0100423 high1 >>= PIPE_FRAME_HIGH_SHIFT;
424 low >>= PIPE_FRAME_LOW_SHIFT;
425 return (high1 << 8) | low;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700426}
427
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700428static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800429{
430 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800431 int reg = PIPE_FRMCOUNT_GM45(pipe);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800432
433 if (!i915_pipe_enabled(dev, pipe)) {
Zhao Yakui44d98a62009-10-09 11:39:40 +0800434 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800435 "pipe %c\n", pipe_name(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800436 return 0;
437 }
438
439 return I915_READ(reg);
440}
441
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700442static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100443 int *vpos, int *hpos)
444{
445 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
446 u32 vbl = 0, position = 0;
447 int vbl_start, vbl_end, htotal, vtotal;
448 bool in_vbl = true;
449 int ret = 0;
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200450 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
451 pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100452
453 if (!i915_pipe_enabled(dev, pipe)) {
454 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800455 "pipe %c\n", pipe_name(pipe));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100456 return 0;
457 }
458
459 /* Get vtotal. */
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200460 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100461
462 if (INTEL_INFO(dev)->gen >= 4) {
463 /* No obvious pixelcount register. Only query vertical
464 * scanout position from Display scan line register.
465 */
466 position = I915_READ(PIPEDSL(pipe));
467
468 /* Decode into vertical scanout position. Don't have
469 * horizontal scanout position.
470 */
471 *vpos = position & 0x1fff;
472 *hpos = 0;
473 } else {
474 /* Have access to pixelcount since start of frame.
475 * We can split this into vertical and horizontal
476 * scanout position.
477 */
478 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
479
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200480 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100481 *vpos = position / htotal;
482 *hpos = position - (*vpos * htotal);
483 }
484
485 /* Query vblank area. */
Paulo Zanonife2b8f92012-10-23 18:30:02 -0200486 vbl = I915_READ(VBLANK(cpu_transcoder));
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100487
488 /* Test position against vblank region. */
489 vbl_start = vbl & 0x1fff;
490 vbl_end = (vbl >> 16) & 0x1fff;
491
492 if ((*vpos < vbl_start) || (*vpos > vbl_end))
493 in_vbl = false;
494
495 /* Inside "upper part" of vblank area? Apply corrective offset: */
496 if (in_vbl && (*vpos >= vbl_start))
497 *vpos = *vpos - vtotal;
498
499 /* Readouts valid? */
500 if (vbl > 0)
501 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
502
503 /* In vblank? */
504 if (in_vbl)
505 ret |= DRM_SCANOUTPOS_INVBL;
506
507 return ret;
508}
509
Jesse Barnesf71d4af2011-06-28 13:00:41 -0700510static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100511 int *max_error,
512 struct timeval *vblank_time,
513 unsigned flags)
514{
Chris Wilson4041b852011-01-22 10:07:56 +0000515 struct drm_crtc *crtc;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100516
Ben Widawsky7eb552a2013-03-13 14:05:41 -0700517 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
Chris Wilson4041b852011-01-22 10:07:56 +0000518 DRM_ERROR("Invalid crtc %d\n", pipe);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100519 return -EINVAL;
520 }
521
522 /* Get drm_crtc to timestamp: */
Chris Wilson4041b852011-01-22 10:07:56 +0000523 crtc = intel_get_crtc_for_pipe(dev, pipe);
524 if (crtc == NULL) {
525 DRM_ERROR("Invalid crtc %d\n", pipe);
526 return -EINVAL;
527 }
528
529 if (!crtc->enabled) {
530 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
531 return -EBUSY;
532 }
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100533
534 /* Helper routine in DRM core does all the work: */
Chris Wilson4041b852011-01-22 10:07:56 +0000535 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
536 vblank_time, flags,
537 crtc);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100538}
539
Egbert Eich321a1b32013-04-11 16:00:26 +0200540static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
541{
542 enum drm_connector_status old_status;
543
544 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
545 old_status = connector->status;
546
547 connector->status = connector->funcs->detect(connector, false);
548 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
549 connector->base.id,
550 drm_get_connector_name(connector),
551 old_status, connector->status);
552 return (old_status != connector->status);
553}
554
Jesse Barnes5ca58282009-03-31 14:11:15 -0700555/*
556 * Handle hotplug events outside the interrupt handler proper.
557 */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200558#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
559
Jesse Barnes5ca58282009-03-31 14:11:15 -0700560static void i915_hotplug_work_func(struct work_struct *work)
561{
562 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
563 hotplug_work);
564 struct drm_device *dev = dev_priv->dev;
Keith Packardc31c4ba2009-05-06 11:48:58 -0700565 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200566 struct intel_connector *intel_connector;
567 struct intel_encoder *intel_encoder;
568 struct drm_connector *connector;
569 unsigned long irqflags;
570 bool hpd_disabled = false;
Egbert Eich321a1b32013-04-11 16:00:26 +0200571 bool changed = false;
Egbert Eich142e2392013-04-11 15:57:57 +0200572 u32 hpd_event_bits;
Jesse Barnes5ca58282009-03-31 14:11:15 -0700573
Daniel Vetter52d7ece2012-12-01 21:03:22 +0100574 /* HPD irq before everything is fully set up. */
575 if (!dev_priv->enable_hotplug_processing)
576 return;
577
Keith Packarda65e34c2011-07-25 10:04:56 -0700578 mutex_lock(&mode_config->mutex);
Jesse Barnese67189ab2011-02-11 14:44:51 -0800579 DRM_DEBUG_KMS("running encoder hotplug functions\n");
580
Egbert Eichcd569ae2013-04-16 13:36:57 +0200581 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Egbert Eich142e2392013-04-11 15:57:57 +0200582
583 hpd_event_bits = dev_priv->hpd_event_bits;
584 dev_priv->hpd_event_bits = 0;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200585 list_for_each_entry(connector, &mode_config->connector_list, head) {
586 intel_connector = to_intel_connector(connector);
587 intel_encoder = intel_connector->encoder;
588 if (intel_encoder->hpd_pin > HPD_NONE &&
589 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
590 connector->polled == DRM_CONNECTOR_POLL_HPD) {
591 DRM_INFO("HPD interrupt storm detected on connector %s: "
592 "switching from hotplug detection to polling\n",
593 drm_get_connector_name(connector));
594 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
595 connector->polled = DRM_CONNECTOR_POLL_CONNECT
596 | DRM_CONNECTOR_POLL_DISCONNECT;
597 hpd_disabled = true;
598 }
Egbert Eich142e2392013-04-11 15:57:57 +0200599 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
600 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
601 drm_get_connector_name(connector), intel_encoder->hpd_pin);
602 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200603 }
604 /* if there were no outputs to poll, poll was disabled,
605 * therefore make sure it's enabled when disabling HPD on
606 * some connectors */
Egbert Eichac4c16c2013-04-16 13:36:58 +0200607 if (hpd_disabled) {
Egbert Eichcd569ae2013-04-16 13:36:57 +0200608 drm_kms_helper_poll_enable(dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +0200609 mod_timer(&dev_priv->hotplug_reenable_timer,
610 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
611 }
Egbert Eichcd569ae2013-04-16 13:36:57 +0200612
613 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
614
Egbert Eich321a1b32013-04-11 16:00:26 +0200615 list_for_each_entry(connector, &mode_config->connector_list, head) {
616 intel_connector = to_intel_connector(connector);
617 intel_encoder = intel_connector->encoder;
618 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
619 if (intel_encoder->hot_plug)
620 intel_encoder->hot_plug(intel_encoder);
621 if (intel_hpd_irq_event(dev, connector))
622 changed = true;
623 }
624 }
Keith Packard40ee3382011-07-28 15:31:19 -0700625 mutex_unlock(&mode_config->mutex);
626
Egbert Eich321a1b32013-04-11 16:00:26 +0200627 if (changed)
628 drm_kms_helper_hotplug_event(dev);
Jesse Barnes5ca58282009-03-31 14:11:15 -0700629}
630
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200631static void ironlake_handle_rps_change(struct drm_device *dev)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800632{
633 drm_i915_private_t *dev_priv = dev->dev_private;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000634 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +0200635 u8 new_delay;
636 unsigned long flags;
637
638 spin_lock_irqsave(&mchdev_lock, flags);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800639
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200640 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
641
Daniel Vetter20e4d402012-08-08 23:35:39 +0200642 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200643
Jesse Barnes7648fa92010-05-20 14:28:11 -0700644 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000645 busy_up = I915_READ(RCPREVBSYTUPAVG);
646 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800647 max_avg = I915_READ(RCBMAXAVG);
648 min_avg = I915_READ(RCBMINAVG);
649
650 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000651 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200652 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
653 new_delay = dev_priv->ips.cur_delay - 1;
654 if (new_delay < dev_priv->ips.max_delay)
655 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000656 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200657 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
658 new_delay = dev_priv->ips.cur_delay + 1;
659 if (new_delay > dev_priv->ips.min_delay)
660 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800661 }
662
Jesse Barnes7648fa92010-05-20 14:28:11 -0700663 if (ironlake_set_drps(dev, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +0200664 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800665
Daniel Vetter92703882012-08-09 16:46:01 +0200666 spin_unlock_irqrestore(&mchdev_lock, flags);
667
Jesse Barnesf97108d2010-01-29 11:27:07 -0800668 return;
669}
670
Chris Wilson549f7362010-10-19 11:19:32 +0100671static void notify_ring(struct drm_device *dev,
672 struct intel_ring_buffer *ring)
673{
674 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9862e602011-01-04 22:22:17 +0000675
Chris Wilson475553d2011-01-20 09:52:56 +0000676 if (ring->obj == NULL)
677 return;
678
Chris Wilsonb2eadbc2012-08-09 10:58:30 +0100679 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
Chris Wilson9862e602011-01-04 22:22:17 +0000680
Chris Wilson549f7362010-10-19 11:19:32 +0100681 wake_up_all(&ring->irq_queue);
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -0700682 if (i915_enable_hangcheck) {
Daniel Vetter99584db2012-11-14 17:14:04 +0100683 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
Chris Wilsoncecc21f2012-10-05 17:02:56 +0100684 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -0700685 }
Chris Wilson549f7362010-10-19 11:19:32 +0100686}
687
Ben Widawsky4912d042011-04-25 11:25:20 -0700688static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800689{
Ben Widawsky4912d042011-04-25 11:25:20 -0700690 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200691 rps.work);
Ben Widawsky4912d042011-04-25 11:25:20 -0700692 u32 pm_iir, pm_imr;
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100693 u8 new_delay;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800694
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200695 spin_lock_irq(&dev_priv->rps.lock);
696 pm_iir = dev_priv->rps.pm_iir;
697 dev_priv->rps.pm_iir = 0;
Ben Widawsky4912d042011-04-25 11:25:20 -0700698 pm_imr = I915_READ(GEN6_PMIMR);
Ben Widawsky48484052013-05-28 19:22:27 -0700699 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
700 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200701 spin_unlock_irq(&dev_priv->rps.lock);
Ben Widawsky4912d042011-04-25 11:25:20 -0700702
Ben Widawsky48484052013-05-28 19:22:27 -0700703 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800704 return;
705
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700706 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +0100707
Ville Syrjälä74250342013-06-25 21:38:11 +0300708 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200709 new_delay = dev_priv->rps.cur_delay + 1;
Ville Syrjälä74250342013-06-25 21:38:11 +0300710
711 /*
712 * For better performance, jump directly
713 * to RPe if we're below it.
714 */
715 if (IS_VALLEYVIEW(dev_priv->dev) &&
716 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
717 new_delay = dev_priv->rps.rpe_delay;
718 } else
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200719 new_delay = dev_priv->rps.cur_delay - 1;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800720
Ben Widawsky79249632012-09-07 19:43:42 -0700721 /* sysfs frequency interfaces may have snuck in while servicing the
722 * interrupt
723 */
Ville Syrjäläd8289c92013-06-25 19:21:05 +0300724 if (new_delay >= dev_priv->rps.min_delay &&
725 new_delay <= dev_priv->rps.max_delay) {
Jesse Barnes0a073b82013-04-17 15:54:58 -0700726 if (IS_VALLEYVIEW(dev_priv->dev))
727 valleyview_set_rps(dev_priv->dev, new_delay);
728 else
729 gen6_set_rps(dev_priv->dev, new_delay);
Ben Widawsky79249632012-09-07 19:43:42 -0700730 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800731
Jesse Barnes52ceb902013-04-23 10:09:26 -0700732 if (IS_VALLEYVIEW(dev_priv->dev)) {
733 /*
734 * On VLV, when we enter RC6 we may not be at the minimum
735 * voltage level, so arm a timer to check. It should only
736 * fire when there's activity or once after we've entered
737 * RC6, and then won't be re-armed until the next RPS interrupt.
738 */
739 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
740 msecs_to_jiffies(100));
741 }
742
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700743 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800744}
745
Ben Widawskye3689192012-05-25 16:56:22 -0700746
747/**
748 * ivybridge_parity_work - Workqueue called when a parity error interrupt
749 * occurred.
750 * @work: workqueue struct
751 *
752 * Doesn't actually do anything except notify userspace. As a consequence of
753 * this event, userspace should try to remap the bad rows since statistically
754 * it is likely the same row is more likely to go bad again.
755 */
756static void ivybridge_parity_work(struct work_struct *work)
757{
758 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Daniel Vettera4da4fa2012-11-02 19:55:07 +0100759 l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -0700760 u32 error_status, row, bank, subbank;
761 char *parity_event[5];
762 uint32_t misccpctl;
763 unsigned long flags;
764
765 /* We must turn off DOP level clock gating to access the L3 registers.
766 * In order to prevent a get/put style interface, acquire struct mutex
767 * any time we access those registers.
768 */
769 mutex_lock(&dev_priv->dev->struct_mutex);
770
771 misccpctl = I915_READ(GEN7_MISCCPCTL);
772 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
773 POSTING_READ(GEN7_MISCCPCTL);
774
775 error_status = I915_READ(GEN7_L3CDERRST1);
776 row = GEN7_PARITY_ERROR_ROW(error_status);
777 bank = GEN7_PARITY_ERROR_BANK(error_status);
778 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
779
780 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
781 GEN7_L3CDERRST1_ENABLE);
782 POSTING_READ(GEN7_L3CDERRST1);
783
784 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
785
786 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Ben Widawskycc609d52013-05-28 19:22:29 -0700787 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
Ben Widawskye3689192012-05-25 16:56:22 -0700788 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
789 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
790
791 mutex_unlock(&dev_priv->dev->struct_mutex);
792
793 parity_event[0] = "L3_PARITY_ERROR=1";
794 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
795 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
796 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
797 parity_event[4] = NULL;
798
799 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
800 KOBJ_CHANGE, parity_event);
801
802 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
803 row, bank, subbank);
804
805 kfree(parity_event[3]);
806 kfree(parity_event[2]);
807 kfree(parity_event[1]);
808}
809
Daniel Vetterd2ba8472012-05-31 14:57:41 +0200810static void ivybridge_handle_parity_error(struct drm_device *dev)
Ben Widawskye3689192012-05-25 16:56:22 -0700811{
812 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
813 unsigned long flags;
814
Ben Widawskye1ef7cc2012-07-24 20:47:31 -0700815 if (!HAS_L3_GPU_CACHE(dev))
Ben Widawskye3689192012-05-25 16:56:22 -0700816 return;
817
818 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Ben Widawskycc609d52013-05-28 19:22:29 -0700819 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
Ben Widawskye3689192012-05-25 16:56:22 -0700820 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
821 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
822
Daniel Vettera4da4fa2012-11-02 19:55:07 +0100823 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -0700824}
825
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200826static void snb_gt_irq_handler(struct drm_device *dev,
827 struct drm_i915_private *dev_priv,
828 u32 gt_iir)
829{
830
Ben Widawskycc609d52013-05-28 19:22:29 -0700831 if (gt_iir &
832 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200833 notify_ring(dev, &dev_priv->ring[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -0700834 if (gt_iir & GT_BSD_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200835 notify_ring(dev, &dev_priv->ring[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -0700836 if (gt_iir & GT_BLT_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200837 notify_ring(dev, &dev_priv->ring[BCS]);
838
Ben Widawskycc609d52013-05-28 19:22:29 -0700839 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
840 GT_BSD_CS_ERROR_INTERRUPT |
841 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200842 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
843 i915_handle_error(dev, false);
844 }
Ben Widawskye3689192012-05-25 16:56:22 -0700845
Ben Widawskycc609d52013-05-28 19:22:29 -0700846 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
Ben Widawskye3689192012-05-25 16:56:22 -0700847 ivybridge_handle_parity_error(dev);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200848}
849
Ben Widawskybaf02a12013-05-28 19:22:24 -0700850/* Legacy way of handling PM interrupts */
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100851static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
852 u32 pm_iir)
853{
854 unsigned long flags;
855
856 /*
857 * IIR bits should never already be set because IMR should
858 * prevent an interrupt from being shown in IIR. The warning
859 * displays a case where we've unsafely cleared
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200860 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100861 * type is not a problem, it displays a problem in the logic.
862 *
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200863 * The mask bit in IMR is cleared by dev_priv->rps.work.
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100864 */
865
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200866 spin_lock_irqsave(&dev_priv->rps.lock, flags);
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200867 dev_priv->rps.pm_iir |= pm_iir;
868 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100869 POSTING_READ(GEN6_PMIMR);
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200870 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100871
Daniel Vetterc6a828d2012-08-08 23:35:35 +0200872 queue_work(dev_priv->wq, &dev_priv->rps.work);
Chris Wilsonfc6826d2012-04-15 11:56:03 +0100873}
874
Egbert Eichb543fb02013-04-16 13:36:54 +0200875#define HPD_STORM_DETECT_PERIOD 1000
876#define HPD_STORM_THRESHOLD 5
877
Egbert Eichcd569ae2013-04-16 13:36:57 +0200878static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
Egbert Eichb543fb02013-04-16 13:36:54 +0200879 u32 hotplug_trigger,
880 const u32 *hpd)
881{
882 drm_i915_private_t *dev_priv = dev->dev_private;
883 unsigned long irqflags;
884 int i;
Egbert Eichcd569ae2013-04-16 13:36:57 +0200885 bool ret = false;
Egbert Eichb543fb02013-04-16 13:36:54 +0200886
887 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
888
889 for (i = 1; i < HPD_NUM_PINS; i++) {
Egbert Eich821450c2013-04-16 13:36:55 +0200890
Egbert Eichb543fb02013-04-16 13:36:54 +0200891 if (!(hpd[i] & hotplug_trigger) ||
892 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
893 continue;
894
Jani Nikulabc5ead8c2013-05-07 15:10:29 +0300895 dev_priv->hpd_event_bits |= (1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +0200896 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
897 dev_priv->hpd_stats[i].hpd_last_jiffies
898 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
899 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
900 dev_priv->hpd_stats[i].hpd_cnt = 0;
901 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
902 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
Egbert Eich142e2392013-04-11 15:57:57 +0200903 dev_priv->hpd_event_bits &= ~(1 << i);
Egbert Eichb543fb02013-04-16 13:36:54 +0200904 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
Egbert Eichcd569ae2013-04-16 13:36:57 +0200905 ret = true;
Egbert Eichb543fb02013-04-16 13:36:54 +0200906 } else {
907 dev_priv->hpd_stats[i].hpd_cnt++;
908 }
909 }
910
911 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Egbert Eichcd569ae2013-04-16 13:36:57 +0200912
913 return ret;
Egbert Eichb543fb02013-04-16 13:36:54 +0200914}
915
Daniel Vetter515ac2b2012-12-01 13:53:44 +0100916static void gmbus_irq_handler(struct drm_device *dev)
917{
Daniel Vetter28c70f12012-12-01 13:53:45 +0100918 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
919
Daniel Vetter28c70f12012-12-01 13:53:45 +0100920 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +0100921}
922
Daniel Vetterce99c252012-12-01 13:53:47 +0100923static void dp_aux_irq_handler(struct drm_device *dev)
924{
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100925 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
926
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100927 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +0100928}
929
Ben Widawskybaf02a12013-05-28 19:22:24 -0700930/* Unlike gen6_queue_rps_work() from which this function is originally derived,
931 * we must be able to deal with other PM interrupts. This is complicated because
932 * of the way in which we use the masks to defer the RPS work (which for
933 * posterity is necessary because of forcewake).
934 */
935static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
936 u32 pm_iir)
937{
938 unsigned long flags;
939
940 spin_lock_irqsave(&dev_priv->rps.lock, flags);
Ben Widawsky48484052013-05-28 19:22:27 -0700941 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
Ben Widawskybaf02a12013-05-28 19:22:24 -0700942 if (dev_priv->rps.pm_iir) {
943 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
944 /* never want to mask useful interrupts. (also posting read) */
Ben Widawsky48484052013-05-28 19:22:27 -0700945 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
Ben Widawskybaf02a12013-05-28 19:22:24 -0700946 /* TODO: if queue_work is slow, move it out of the spinlock */
947 queue_work(dev_priv->wq, &dev_priv->rps.work);
948 }
949 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
950
Ben Widawsky12638c52013-05-28 19:22:31 -0700951 if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
952 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
953 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
954
955 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
956 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
957 i915_handle_error(dev_priv->dev, false);
958 }
959 }
Ben Widawskybaf02a12013-05-28 19:22:24 -0700960}
961
Daniel Vetterff1f5252012-10-02 15:10:55 +0200962static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700963{
964 struct drm_device *dev = (struct drm_device *) arg;
965 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
966 u32 iir, gt_iir, pm_iir;
967 irqreturn_t ret = IRQ_NONE;
968 unsigned long irqflags;
969 int pipe;
970 u32 pipe_stats[I915_MAX_PIPES];
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700971
972 atomic_inc(&dev_priv->irq_received);
973
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700974 while (true) {
975 iir = I915_READ(VLV_IIR);
976 gt_iir = I915_READ(GTIIR);
977 pm_iir = I915_READ(GEN6_PMIIR);
978
979 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
980 goto out;
981
982 ret = IRQ_HANDLED;
983
Daniel Vettere7b4c6b2012-03-30 20:24:35 +0200984 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700985
986 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
987 for_each_pipe(pipe) {
988 int reg = PIPESTAT(pipe);
989 pipe_stats[pipe] = I915_READ(reg);
990
991 /*
992 * Clear the PIPE*STAT regs before the IIR
993 */
994 if (pipe_stats[pipe] & 0x8000ffff) {
995 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
996 DRM_DEBUG_DRIVER("pipe %c underrun\n",
997 pipe_name(pipe));
998 I915_WRITE(reg, pipe_stats[pipe]);
999 }
1000 }
1001 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1002
Jesse Barnes31acc7f2012-06-20 10:53:11 -07001003 for_each_pipe(pipe) {
1004 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1005 drm_handle_vblank(dev, pipe);
1006
1007 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1008 intel_prepare_page_flip(dev, pipe);
1009 intel_finish_page_flip(dev, pipe);
1010 }
1011 }
1012
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001013 /* Consume port. Then clear IIR or we'll miss events */
1014 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1015 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02001016 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001017
1018 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1019 hotplug_status);
Egbert Eichb543fb02013-04-16 13:36:54 +02001020 if (hotplug_trigger) {
Egbert Eichcd569ae2013-04-16 13:36:57 +02001021 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
1022 i915_hpd_irq_setup(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001023 queue_work(dev_priv->wq,
1024 &dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02001025 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001026 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1027 I915_READ(PORT_HOTPLUG_STAT);
1028 }
1029
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001030 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1031 gmbus_irq_handler(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001032
Ben Widawsky48484052013-05-28 19:22:27 -07001033 if (pm_iir & GEN6_PM_RPS_EVENTS)
Chris Wilsonfc6826d2012-04-15 11:56:03 +01001034 gen6_queue_rps_work(dev_priv, pm_iir);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001035
1036 I915_WRITE(GTIIR, gt_iir);
1037 I915_WRITE(GEN6_PMIIR, pm_iir);
1038 I915_WRITE(VLV_IIR, iir);
1039 }
1040
1041out:
1042 return ret;
1043}
1044
Adam Jackson23e81d62012-06-06 15:45:44 -04001045static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08001046{
1047 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001048 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001049 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08001050
Egbert Eichb543fb02013-04-16 13:36:54 +02001051 if (hotplug_trigger) {
Egbert Eichcd569ae2013-04-16 13:36:57 +02001052 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
1053 ibx_hpd_irq_setup(dev);
Daniel Vetter76e43832012-10-12 20:14:05 +02001054 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02001055 }
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001056 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1057 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1058 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08001059 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001060 port_name(port));
1061 }
Jesse Barnes776ad802011-01-04 15:09:39 -08001062
Daniel Vetterce99c252012-12-01 13:53:47 +01001063 if (pch_iir & SDE_AUX_MASK)
1064 dp_aux_irq_handler(dev);
1065
Jesse Barnes776ad802011-01-04 15:09:39 -08001066 if (pch_iir & SDE_GMBUS)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001067 gmbus_irq_handler(dev);
Jesse Barnes776ad802011-01-04 15:09:39 -08001068
1069 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1070 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1071
1072 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1073 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1074
1075 if (pch_iir & SDE_POISON)
1076 DRM_ERROR("PCH poison interrupt\n");
1077
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001078 if (pch_iir & SDE_FDI_MASK)
1079 for_each_pipe(pipe)
1080 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1081 pipe_name(pipe),
1082 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08001083
1084 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1085 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1086
1087 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1088 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1089
Jesse Barnes776ad802011-01-04 15:09:39 -08001090 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Paulo Zanoni86642812013-04-12 17:57:57 -03001091 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1092 false))
1093 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1094
1095 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1096 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1097 false))
1098 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1099}
1100
1101static void ivb_err_int_handler(struct drm_device *dev)
1102{
1103 struct drm_i915_private *dev_priv = dev->dev_private;
1104 u32 err_int = I915_READ(GEN7_ERR_INT);
1105
Paulo Zanonide032bf2013-04-12 17:57:58 -03001106 if (err_int & ERR_INT_POISON)
1107 DRM_ERROR("Poison interrupt\n");
1108
Paulo Zanoni86642812013-04-12 17:57:57 -03001109 if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1110 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1111 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1112
1113 if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1114 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1115 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1116
1117 if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1118 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1119 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1120
1121 I915_WRITE(GEN7_ERR_INT, err_int);
1122}
1123
1124static void cpt_serr_int_handler(struct drm_device *dev)
1125{
1126 struct drm_i915_private *dev_priv = dev->dev_private;
1127 u32 serr_int = I915_READ(SERR_INT);
1128
Paulo Zanonide032bf2013-04-12 17:57:58 -03001129 if (serr_int & SERR_INT_POISON)
1130 DRM_ERROR("PCH poison interrupt\n");
1131
Paulo Zanoni86642812013-04-12 17:57:57 -03001132 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1133 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1134 false))
1135 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1136
1137 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1138 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1139 false))
1140 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1141
1142 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1143 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1144 false))
1145 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1146
1147 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08001148}
1149
Adam Jackson23e81d62012-06-06 15:45:44 -04001150static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1151{
1152 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1153 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02001154 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04001155
Egbert Eichb543fb02013-04-16 13:36:54 +02001156 if (hotplug_trigger) {
Egbert Eichcd569ae2013-04-16 13:36:57 +02001157 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
1158 ibx_hpd_irq_setup(dev);
Daniel Vetter76e43832012-10-12 20:14:05 +02001159 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02001160 }
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03001161 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1162 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1163 SDE_AUDIO_POWER_SHIFT_CPT);
1164 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1165 port_name(port));
1166 }
Adam Jackson23e81d62012-06-06 15:45:44 -04001167
1168 if (pch_iir & SDE_AUX_MASK_CPT)
Daniel Vetterce99c252012-12-01 13:53:47 +01001169 dp_aux_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001170
1171 if (pch_iir & SDE_GMBUS_CPT)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001172 gmbus_irq_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001173
1174 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1175 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1176
1177 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1178 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1179
1180 if (pch_iir & SDE_FDI_MASK_CPT)
1181 for_each_pipe(pipe)
1182 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1183 pipe_name(pipe),
1184 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03001185
1186 if (pch_iir & SDE_ERROR_CPT)
1187 cpt_serr_int_handler(dev);
Adam Jackson23e81d62012-06-06 15:45:44 -04001188}
1189
Daniel Vetterff1f5252012-10-02 15:10:55 +02001190static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001191{
1192 struct drm_device *dev = (struct drm_device *) arg;
1193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Ben Widawskyab5c6082013-04-05 13:12:41 -07001194 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01001195 irqreturn_t ret = IRQ_NONE;
1196 int i;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001197
1198 atomic_inc(&dev_priv->irq_received);
1199
Paulo Zanoni86642812013-04-12 17:57:57 -03001200 /* We get interrupts on unclaimed registers, so check for this before we
1201 * do any I915_{READ,WRITE}. */
1202 if (IS_HASWELL(dev) &&
1203 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1204 DRM_ERROR("Unclaimed register before interrupt\n");
1205 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1206 }
1207
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001208 /* disable master interrupt before clearing iir */
1209 de_ier = I915_READ(DEIER);
1210 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Chris Wilson0e434062012-05-09 21:45:44 +01001211
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001212 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1213 * interrupts will will be stored on its back queue, and then we'll be
1214 * able to process them after we restore SDEIER (as soon as we restore
1215 * it, we'll get an interrupt if SDEIIR still has something to process
1216 * due to its back queue). */
Ben Widawskyab5c6082013-04-05 13:12:41 -07001217 if (!HAS_PCH_NOP(dev)) {
1218 sde_ier = I915_READ(SDEIER);
1219 I915_WRITE(SDEIER, 0);
1220 POSTING_READ(SDEIER);
1221 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001222
Paulo Zanoni86642812013-04-12 17:57:57 -03001223 /* On Haswell, also mask ERR_INT because we don't want to risk
1224 * generating "unclaimed register" interrupts from inside the interrupt
1225 * handler. */
Daniel Vetter4bc9d432013-06-27 13:44:58 +02001226 if (IS_HASWELL(dev)) {
1227 spin_lock(&dev_priv->irq_lock);
Paulo Zanoni86642812013-04-12 17:57:57 -03001228 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
Daniel Vetter4bc9d432013-06-27 13:44:58 +02001229 spin_unlock(&dev_priv->irq_lock);
1230 }
Paulo Zanoni86642812013-04-12 17:57:57 -03001231
Chris Wilson0e434062012-05-09 21:45:44 +01001232 gt_iir = I915_READ(GTIIR);
1233 if (gt_iir) {
1234 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1235 I915_WRITE(GTIIR, gt_iir);
1236 ret = IRQ_HANDLED;
1237 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001238
1239 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01001240 if (de_iir) {
Paulo Zanoni86642812013-04-12 17:57:57 -03001241 if (de_iir & DE_ERR_INT_IVB)
1242 ivb_err_int_handler(dev);
1243
Daniel Vetterce99c252012-12-01 13:53:47 +01001244 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1245 dp_aux_irq_handler(dev);
1246
Chris Wilson0e434062012-05-09 21:45:44 +01001247 if (de_iir & DE_GSE_IVB)
Jani Nikula81a07802013-04-24 22:18:44 +03001248 intel_opregion_asle_intr(dev);
Chris Wilson0e434062012-05-09 21:45:44 +01001249
1250 for (i = 0; i < 3; i++) {
Daniel Vetter74d44442012-10-02 17:54:35 +02001251 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1252 drm_handle_vblank(dev, i);
Chris Wilson0e434062012-05-09 21:45:44 +01001253 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1254 intel_prepare_page_flip(dev, i);
1255 intel_finish_page_flip_plane(dev, i);
1256 }
Chris Wilson0e434062012-05-09 21:45:44 +01001257 }
1258
1259 /* check event from PCH */
Ben Widawskyab5c6082013-04-05 13:12:41 -07001260 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
Chris Wilson0e434062012-05-09 21:45:44 +01001261 u32 pch_iir = I915_READ(SDEIIR);
1262
Adam Jackson23e81d62012-06-06 15:45:44 -04001263 cpt_irq_handler(dev, pch_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01001264
1265 /* clear PCH hotplug event before clear CPU irq */
1266 I915_WRITE(SDEIIR, pch_iir);
1267 }
1268
1269 I915_WRITE(DEIIR, de_iir);
1270 ret = IRQ_HANDLED;
1271 }
1272
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001273 pm_iir = I915_READ(GEN6_PMIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01001274 if (pm_iir) {
Ben Widawskybaf02a12013-05-28 19:22:24 -07001275 if (IS_HASWELL(dev))
1276 hsw_pm_irq_handler(dev_priv, pm_iir);
Ben Widawsky48484052013-05-28 19:22:27 -07001277 else if (pm_iir & GEN6_PM_RPS_EVENTS)
Chris Wilson0e434062012-05-09 21:45:44 +01001278 gen6_queue_rps_work(dev_priv, pm_iir);
1279 I915_WRITE(GEN6_PMIIR, pm_iir);
1280 ret = IRQ_HANDLED;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001281 }
1282
Daniel Vetter4bc9d432013-06-27 13:44:58 +02001283 if (IS_HASWELL(dev)) {
1284 spin_lock(&dev_priv->irq_lock);
1285 if (ivb_can_enable_err_int(dev))
1286 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1287 spin_unlock(&dev_priv->irq_lock);
1288 }
Paulo Zanoni86642812013-04-12 17:57:57 -03001289
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001290 I915_WRITE(DEIER, de_ier);
1291 POSTING_READ(DEIER);
Ben Widawskyab5c6082013-04-05 13:12:41 -07001292 if (!HAS_PCH_NOP(dev)) {
1293 I915_WRITE(SDEIER, sde_ier);
1294 POSTING_READ(SDEIER);
1295 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07001296
1297 return ret;
1298}
1299
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001300static void ilk_gt_irq_handler(struct drm_device *dev,
1301 struct drm_i915_private *dev_priv,
1302 u32 gt_iir)
1303{
Ben Widawskycc609d52013-05-28 19:22:29 -07001304 if (gt_iir &
1305 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001306 notify_ring(dev, &dev_priv->ring[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001307 if (gt_iir & ILK_BSD_USER_INTERRUPT)
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001308 notify_ring(dev, &dev_priv->ring[VCS]);
1309}
1310
Daniel Vetterff1f5252012-10-02 15:10:55 +02001311static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001312{
Jesse Barnes46979952011-04-07 13:53:55 -07001313 struct drm_device *dev = (struct drm_device *) arg;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001314 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1315 int ret = IRQ_NONE;
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001316 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001317
Jesse Barnes46979952011-04-07 13:53:55 -07001318 atomic_inc(&dev_priv->irq_received);
1319
Zou, Nanhai2d109a82009-11-06 02:13:01 +00001320 /* disable master interrupt before clearing iir */
1321 de_ier = I915_READ(DEIER);
1322 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001323 POSTING_READ(DEIER);
Zou, Nanhai2d109a82009-11-06 02:13:01 +00001324
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001325 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1326 * interrupts will will be stored on its back queue, and then we'll be
1327 * able to process them after we restore SDEIER (as soon as we restore
1328 * it, we'll get an interrupt if SDEIIR still has something to process
1329 * due to its back queue). */
1330 sde_ier = I915_READ(SDEIER);
1331 I915_WRITE(SDEIER, 0);
1332 POSTING_READ(SDEIER);
1333
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001334 de_iir = I915_READ(DEIIR);
1335 gt_iir = I915_READ(GTIIR);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001336 pm_iir = I915_READ(GEN6_PMIIR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001337
Daniel Vetteracd15b62012-11-30 11:24:50 +01001338 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
Zou Nan haic7c85102010-01-15 10:29:06 +08001339 goto done;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001340
Zou Nan haic7c85102010-01-15 10:29:06 +08001341 ret = IRQ_HANDLED;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001342
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001343 if (IS_GEN5(dev))
1344 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1345 else
1346 snb_gt_irq_handler(dev, dev_priv, gt_iir);
Zou Nan haic7c85102010-01-15 10:29:06 +08001347
Daniel Vetterce99c252012-12-01 13:53:47 +01001348 if (de_iir & DE_AUX_CHANNEL_A)
1349 dp_aux_irq_handler(dev);
1350
Zou Nan haic7c85102010-01-15 10:29:06 +08001351 if (de_iir & DE_GSE)
Jani Nikula81a07802013-04-24 22:18:44 +03001352 intel_opregion_asle_intr(dev);
Zou Nan haic7c85102010-01-15 10:29:06 +08001353
Daniel Vetter74d44442012-10-02 17:54:35 +02001354 if (de_iir & DE_PIPEA_VBLANK)
1355 drm_handle_vblank(dev, 0);
1356
1357 if (de_iir & DE_PIPEB_VBLANK)
1358 drm_handle_vblank(dev, 1);
1359
Paulo Zanonide032bf2013-04-12 17:57:58 -03001360 if (de_iir & DE_POISON)
1361 DRM_ERROR("Poison interrupt\n");
1362
Paulo Zanoni86642812013-04-12 17:57:57 -03001363 if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1364 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1365 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1366
1367 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1368 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1369 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1370
Zhenyu Wangf072d2e2010-02-09 09:46:19 +08001371 if (de_iir & DE_PLANEA_FLIP_DONE) {
Jesse Barnes013d5aa2010-01-29 11:18:31 -08001372 intel_prepare_page_flip(dev, 0);
Chris Wilson2bbda382010-09-02 17:59:39 +01001373 intel_finish_page_flip_plane(dev, 0);
Jesse Barnes013d5aa2010-01-29 11:18:31 -08001374 }
1375
Zhenyu Wangf072d2e2010-02-09 09:46:19 +08001376 if (de_iir & DE_PLANEB_FLIP_DONE) {
1377 intel_prepare_page_flip(dev, 1);
Chris Wilson2bbda382010-09-02 17:59:39 +01001378 intel_finish_page_flip_plane(dev, 1);
Jesse Barnes013d5aa2010-01-29 11:18:31 -08001379 }
Li Pengc062df62010-01-23 00:12:58 +08001380
Zou Nan haic7c85102010-01-15 10:29:06 +08001381 /* check event from PCH */
Jesse Barnes776ad802011-01-04 15:09:39 -08001382 if (de_iir & DE_PCH_EVENT) {
Daniel Vetteracd15b62012-11-30 11:24:50 +01001383 u32 pch_iir = I915_READ(SDEIIR);
1384
Adam Jackson23e81d62012-06-06 15:45:44 -04001385 if (HAS_PCH_CPT(dev))
1386 cpt_irq_handler(dev, pch_iir);
1387 else
1388 ibx_irq_handler(dev, pch_iir);
Daniel Vetteracd15b62012-11-30 11:24:50 +01001389
1390 /* should clear PCH hotplug event before clear CPU irq */
1391 I915_WRITE(SDEIIR, pch_iir);
Jesse Barnes776ad802011-01-04 15:09:39 -08001392 }
Zou Nan haic7c85102010-01-15 10:29:06 +08001393
Daniel Vetter73edd18f2012-08-08 23:35:37 +02001394 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1395 ironlake_handle_rps_change(dev);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001396
Ben Widawsky48484052013-05-28 19:22:27 -07001397 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
Chris Wilsonfc6826d2012-04-15 11:56:03 +01001398 gen6_queue_rps_work(dev_priv, pm_iir);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001399
Zou Nan haic7c85102010-01-15 10:29:06 +08001400 I915_WRITE(GTIIR, gt_iir);
1401 I915_WRITE(DEIIR, de_iir);
Ben Widawsky4912d042011-04-25 11:25:20 -07001402 I915_WRITE(GEN6_PMIIR, pm_iir);
Zou Nan haic7c85102010-01-15 10:29:06 +08001403
1404done:
Zou, Nanhai2d109a82009-11-06 02:13:01 +00001405 I915_WRITE(DEIER, de_ier);
Chris Wilson3143a2b2010-11-16 15:55:10 +00001406 POSTING_READ(DEIER);
Paulo Zanoni44498ae2013-02-22 17:05:28 -03001407 I915_WRITE(SDEIER, sde_ier);
1408 POSTING_READ(SDEIER);
Zou, Nanhai2d109a82009-11-06 02:13:01 +00001409
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001410 return ret;
1411}
1412
Jesse Barnes8a905232009-07-11 16:48:03 -04001413/**
1414 * i915_error_work_func - do process context error handling work
1415 * @work: work struct
1416 *
1417 * Fire an error uevent so userspace can see that a hang or error
1418 * was detected.
1419 */
1420static void i915_error_work_func(struct work_struct *work)
1421{
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001422 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1423 work);
1424 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1425 gpu_error);
Jesse Barnes8a905232009-07-11 16:48:03 -04001426 struct drm_device *dev = dev_priv->dev;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001427 struct intel_ring_buffer *ring;
Ben Gamarif316a422009-09-14 17:48:46 -04001428 char *error_event[] = { "ERROR=1", NULL };
1429 char *reset_event[] = { "RESET=1", NULL };
1430 char *reset_done_event[] = { "ERROR=0", NULL };
Daniel Vetterf69061b2012-12-06 09:01:42 +01001431 int i, ret;
Jesse Barnes8a905232009-07-11 16:48:03 -04001432
Ben Gamarif316a422009-09-14 17:48:46 -04001433 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04001434
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001435 /*
1436 * Note that there's only one work item which does gpu resets, so we
1437 * need not worry about concurrent gpu resets potentially incrementing
1438 * error->reset_counter twice. We only need to take care of another
1439 * racing irq/hangcheck declaring the gpu dead for a second time. A
1440 * quick check for that is good enough: schedule_work ensures the
1441 * correct ordering between hang detection and this work item, and since
1442 * the reset in-progress bit is only ever set by code outside of this
1443 * work we don't need to worry about any other races.
1444 */
1445 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
Chris Wilsonf803aa52010-09-19 12:38:26 +01001446 DRM_DEBUG_DRIVER("resetting chip\n");
Daniel Vetter7db0ba22012-12-06 16:23:37 +01001447 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1448 reset_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001449
Daniel Vetterf69061b2012-12-06 09:01:42 +01001450 ret = i915_reset(dev);
1451
1452 if (ret == 0) {
1453 /*
1454 * After all the gem state is reset, increment the reset
1455 * counter and wake up everyone waiting for the reset to
1456 * complete.
1457 *
1458 * Since unlock operations are a one-sided barrier only,
1459 * we need to insert a barrier here to order any seqno
1460 * updates before
1461 * the counter increment.
1462 */
1463 smp_mb__before_atomic_inc();
1464 atomic_inc(&dev_priv->gpu_error.reset_counter);
1465
1466 kobject_uevent_env(&dev->primary->kdev.kobj,
1467 KOBJ_CHANGE, reset_done_event);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001468 } else {
1469 atomic_set(&error->reset_counter, I915_WEDGED);
Ben Gamarif316a422009-09-14 17:48:46 -04001470 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001471
Daniel Vetterf69061b2012-12-06 09:01:42 +01001472 for_each_ring(ring, dev_priv, i)
1473 wake_up_all(&ring->irq_queue);
1474
Ville Syrjälä96a02912013-02-18 19:08:49 +02001475 intel_display_handle_reset(dev);
1476
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001477 wake_up_all(&dev_priv->gpu_error.reset_queue);
Ben Gamarif316a422009-09-14 17:48:46 -04001478 }
Jesse Barnes8a905232009-07-11 16:48:03 -04001479}
1480
Daniel Vetter85f9e502012-08-31 21:42:26 +02001481/* NB: please notice the memset */
1482static void i915_get_extra_instdone(struct drm_device *dev,
1483 uint32_t *instdone)
1484{
1485 struct drm_i915_private *dev_priv = dev->dev_private;
1486 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1487
1488 switch(INTEL_INFO(dev)->gen) {
1489 case 2:
1490 case 3:
1491 instdone[0] = I915_READ(INSTDONE);
1492 break;
1493 case 4:
1494 case 5:
1495 case 6:
1496 instdone[0] = I915_READ(INSTDONE_I965);
1497 instdone[1] = I915_READ(INSTDONE1);
1498 break;
1499 default:
1500 WARN_ONCE(1, "Unsupported platform\n");
1501 case 7:
1502 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1503 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1504 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1505 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1506 break;
1507 }
1508}
1509
Chris Wilson3bd3c932010-08-19 08:19:30 +01001510#ifdef CONFIG_DEBUG_FS
Chris Wilson9df30792010-02-18 10:24:56 +00001511static struct drm_i915_error_object *
Ben Widawskyd0d045e2013-02-24 18:10:00 -08001512i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1513 struct drm_i915_gem_object *src,
1514 const int num_pages)
Chris Wilson9df30792010-02-18 10:24:56 +00001515{
1516 struct drm_i915_error_object *dst;
Ben Widawskyd0d045e2013-02-24 18:10:00 -08001517 int i;
Chris Wilsone56660d2010-08-07 11:01:26 +01001518 u32 reloc_offset;
Chris Wilson9df30792010-02-18 10:24:56 +00001519
Chris Wilson05394f32010-11-08 19:18:58 +00001520 if (src == NULL || src->pages == NULL)
Chris Wilson9df30792010-02-18 10:24:56 +00001521 return NULL;
1522
Ben Widawskyd0d045e2013-02-24 18:10:00 -08001523 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
Chris Wilson9df30792010-02-18 10:24:56 +00001524 if (dst == NULL)
1525 return NULL;
1526
Chris Wilson05394f32010-11-08 19:18:58 +00001527 reloc_offset = src->gtt_offset;
Ben Widawskyd0d045e2013-02-24 18:10:00 -08001528 for (i = 0; i < num_pages; i++) {
Andrew Morton788885a2010-05-11 14:07:05 -07001529 unsigned long flags;
Chris Wilsone56660d2010-08-07 11:01:26 +01001530 void *d;
Andrew Morton788885a2010-05-11 14:07:05 -07001531
Chris Wilsone56660d2010-08-07 11:01:26 +01001532 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
Chris Wilson9df30792010-02-18 10:24:56 +00001533 if (d == NULL)
1534 goto unwind;
Chris Wilsone56660d2010-08-07 11:01:26 +01001535
Andrew Morton788885a2010-05-11 14:07:05 -07001536 local_irq_save(flags);
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001537 if (reloc_offset < dev_priv->gtt.mappable_end &&
Daniel Vetter74898d72012-02-15 23:50:22 +01001538 src->has_global_gtt_mapping) {
Chris Wilson172975aa2011-12-14 13:57:25 +01001539 void __iomem *s;
1540
1541 /* Simply ignore tiling or any overlapping fence.
1542 * It's part of the error state, and this hopefully
1543 * captures what the GPU read.
1544 */
1545
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001546 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
Chris Wilson172975aa2011-12-14 13:57:25 +01001547 reloc_offset);
1548 memcpy_fromio(d, s, PAGE_SIZE);
1549 io_mapping_unmap_atomic(s);
Chris Wilson960e3562012-11-15 11:32:23 +00001550 } else if (src->stolen) {
1551 unsigned long offset;
1552
1553 offset = dev_priv->mm.stolen_base;
1554 offset += src->stolen->start;
1555 offset += i << PAGE_SHIFT;
1556
Daniel Vetter1a240d42012-11-29 22:18:51 +01001557 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
Chris Wilson172975aa2011-12-14 13:57:25 +01001558 } else {
Chris Wilson9da3da62012-06-01 15:20:22 +01001559 struct page *page;
Chris Wilson172975aa2011-12-14 13:57:25 +01001560 void *s;
1561
Chris Wilson9da3da62012-06-01 15:20:22 +01001562 page = i915_gem_object_get_page(src, i);
Chris Wilson172975aa2011-12-14 13:57:25 +01001563
Chris Wilson9da3da62012-06-01 15:20:22 +01001564 drm_clflush_pages(&page, 1);
1565
1566 s = kmap_atomic(page);
Chris Wilson172975aa2011-12-14 13:57:25 +01001567 memcpy(d, s, PAGE_SIZE);
1568 kunmap_atomic(s);
1569
Chris Wilson9da3da62012-06-01 15:20:22 +01001570 drm_clflush_pages(&page, 1);
Chris Wilson172975aa2011-12-14 13:57:25 +01001571 }
Andrew Morton788885a2010-05-11 14:07:05 -07001572 local_irq_restore(flags);
Chris Wilsone56660d2010-08-07 11:01:26 +01001573
Chris Wilson9da3da62012-06-01 15:20:22 +01001574 dst->pages[i] = d;
Chris Wilsone56660d2010-08-07 11:01:26 +01001575
1576 reloc_offset += PAGE_SIZE;
Chris Wilson9df30792010-02-18 10:24:56 +00001577 }
Ben Widawskyd0d045e2013-02-24 18:10:00 -08001578 dst->page_count = num_pages;
Chris Wilson05394f32010-11-08 19:18:58 +00001579 dst->gtt_offset = src->gtt_offset;
Chris Wilson9df30792010-02-18 10:24:56 +00001580
1581 return dst;
1582
1583unwind:
Chris Wilson9da3da62012-06-01 15:20:22 +01001584 while (i--)
1585 kfree(dst->pages[i]);
Chris Wilson9df30792010-02-18 10:24:56 +00001586 kfree(dst);
1587 return NULL;
1588}
Ben Widawskyd0d045e2013-02-24 18:10:00 -08001589#define i915_error_object_create(dev_priv, src) \
1590 i915_error_object_create_sized((dev_priv), (src), \
1591 (src)->base.size>>PAGE_SHIFT)
Chris Wilson9df30792010-02-18 10:24:56 +00001592
1593static void
1594i915_error_object_free(struct drm_i915_error_object *obj)
1595{
1596 int page;
1597
1598 if (obj == NULL)
1599 return;
1600
1601 for (page = 0; page < obj->page_count; page++)
1602 kfree(obj->pages[page]);
1603
1604 kfree(obj);
1605}
1606
Daniel Vetter742cbee2012-04-27 15:17:39 +02001607void
1608i915_error_state_free(struct kref *error_ref)
Chris Wilson9df30792010-02-18 10:24:56 +00001609{
Daniel Vetter742cbee2012-04-27 15:17:39 +02001610 struct drm_i915_error_state *error = container_of(error_ref,
1611 typeof(*error), ref);
Chris Wilsone2f973d2011-01-27 19:15:11 +00001612 int i;
1613
Chris Wilson52d39a22012-02-15 11:25:37 +00001614 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1615 i915_error_object_free(error->ring[i].batchbuffer);
1616 i915_error_object_free(error->ring[i].ringbuffer);
Ben Widawsky7ed73da2013-05-25 14:42:54 -07001617 i915_error_object_free(error->ring[i].ctx);
Chris Wilson52d39a22012-02-15 11:25:37 +00001618 kfree(error->ring[i].requests);
1619 }
Chris Wilsone2f973d2011-01-27 19:15:11 +00001620
Chris Wilson9df30792010-02-18 10:24:56 +00001621 kfree(error->active_bo);
Chris Wilson6ef3d422010-08-04 20:26:07 +01001622 kfree(error->overlay);
Ben Widawsky7ed73da2013-05-25 14:42:54 -07001623 kfree(error->display);
Chris Wilson9df30792010-02-18 10:24:56 +00001624 kfree(error);
1625}
Chris Wilson1b502472012-04-24 15:47:30 +01001626static void capture_bo(struct drm_i915_error_buffer *err,
1627 struct drm_i915_gem_object *obj)
1628{
1629 err->size = obj->base.size;
1630 err->name = obj->base.name;
Chris Wilson0201f1e2012-07-20 12:41:01 +01001631 err->rseqno = obj->last_read_seqno;
1632 err->wseqno = obj->last_write_seqno;
Chris Wilson1b502472012-04-24 15:47:30 +01001633 err->gtt_offset = obj->gtt_offset;
1634 err->read_domains = obj->base.read_domains;
1635 err->write_domain = obj->base.write_domain;
1636 err->fence_reg = obj->fence_reg;
1637 err->pinned = 0;
1638 if (obj->pin_count > 0)
1639 err->pinned = 1;
1640 if (obj->user_pin_count > 0)
1641 err->pinned = -1;
1642 err->tiling = obj->tiling_mode;
1643 err->dirty = obj->dirty;
1644 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1645 err->ring = obj->ring ? obj->ring->id : -1;
1646 err->cache_level = obj->cache_level;
1647}
Chris Wilson9df30792010-02-18 10:24:56 +00001648
Chris Wilson1b502472012-04-24 15:47:30 +01001649static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1650 int count, struct list_head *head)
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001651{
1652 struct drm_i915_gem_object *obj;
1653 int i = 0;
1654
1655 list_for_each_entry(obj, head, mm_list) {
Chris Wilson1b502472012-04-24 15:47:30 +01001656 capture_bo(err++, obj);
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001657 if (++i == count)
1658 break;
Chris Wilson1b502472012-04-24 15:47:30 +01001659 }
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001660
Chris Wilson1b502472012-04-24 15:47:30 +01001661 return i;
1662}
1663
1664static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1665 int count, struct list_head *head)
1666{
1667 struct drm_i915_gem_object *obj;
1668 int i = 0;
1669
Ben Widawsky35c20a62013-05-31 11:28:48 -07001670 list_for_each_entry(obj, head, global_list) {
Chris Wilson1b502472012-04-24 15:47:30 +01001671 if (obj->pin_count == 0)
1672 continue;
1673
1674 capture_bo(err++, obj);
1675 if (++i == count)
1676 break;
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001677 }
1678
1679 return i;
1680}
1681
Chris Wilson748ebc62010-10-24 10:28:47 +01001682static void i915_gem_record_fences(struct drm_device *dev,
1683 struct drm_i915_error_state *error)
1684{
1685 struct drm_i915_private *dev_priv = dev->dev_private;
1686 int i;
1687
1688 /* Fences */
1689 switch (INTEL_INFO(dev)->gen) {
Daniel Vetter775d17b2011-10-09 21:52:01 +02001690 case 7:
Chris Wilson748ebc62010-10-24 10:28:47 +01001691 case 6:
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03001692 for (i = 0; i < dev_priv->num_fence_regs; i++)
Chris Wilson748ebc62010-10-24 10:28:47 +01001693 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1694 break;
1695 case 5:
1696 case 4:
1697 for (i = 0; i < 16; i++)
1698 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1699 break;
1700 case 3:
1701 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1702 for (i = 0; i < 8; i++)
1703 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1704 case 2:
1705 for (i = 0; i < 8; i++)
1706 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1707 break;
1708
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08001709 default:
1710 BUG();
Chris Wilson748ebc62010-10-24 10:28:47 +01001711 }
1712}
1713
Chris Wilsonbcfb2e22011-01-07 21:06:07 +00001714static struct drm_i915_error_object *
1715i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1716 struct intel_ring_buffer *ring)
1717{
1718 struct drm_i915_gem_object *obj;
1719 u32 seqno;
1720
1721 if (!ring->get_seqno)
1722 return NULL;
1723
Daniel Vetterb45305f2012-12-17 16:21:27 +01001724 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1725 u32 acthd = I915_READ(ACTHD);
1726
1727 if (WARN_ON(ring->id != RCS))
1728 return NULL;
1729
1730 obj = ring->private;
1731 if (acthd >= obj->gtt_offset &&
1732 acthd < obj->gtt_offset + obj->base.size)
1733 return i915_error_object_create(dev_priv, obj);
1734 }
1735
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01001736 seqno = ring->get_seqno(ring, false);
Chris Wilsonbcfb2e22011-01-07 21:06:07 +00001737 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1738 if (obj->ring != ring)
1739 continue;
1740
Chris Wilson0201f1e2012-07-20 12:41:01 +01001741 if (i915_seqno_passed(seqno, obj->last_read_seqno))
Chris Wilsonbcfb2e22011-01-07 21:06:07 +00001742 continue;
1743
1744 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1745 continue;
1746
1747 /* We need to copy these to an anonymous buffer as the simplest
1748 * method to avoid being overwritten by userspace.
1749 */
1750 return i915_error_object_create(dev_priv, obj);
1751 }
1752
1753 return NULL;
1754}
1755
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001756static void i915_record_ring_state(struct drm_device *dev,
1757 struct drm_i915_error_state *error,
1758 struct intel_ring_buffer *ring)
1759{
1760 struct drm_i915_private *dev_priv = dev->dev_private;
1761
Daniel Vetter33f3f512011-12-14 13:57:39 +01001762 if (INTEL_INFO(dev)->gen >= 6) {
Chris Wilson12f55812012-07-05 17:14:01 +01001763 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
Daniel Vetter33f3f512011-12-14 13:57:39 +01001764 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
Daniel Vetter7e3b8732012-02-01 22:26:45 +01001765 error->semaphore_mboxes[ring->id][0]
1766 = I915_READ(RING_SYNC_0(ring->mmio_base));
1767 error->semaphore_mboxes[ring->id][1]
1768 = I915_READ(RING_SYNC_1(ring->mmio_base));
Chris Wilsondf2b23d2012-11-27 17:06:54 +00001769 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1770 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
Daniel Vetter33f3f512011-12-14 13:57:39 +01001771 }
Daniel Vetterc1cd90e2011-12-14 13:57:02 +01001772
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001773 if (INTEL_INFO(dev)->gen >= 4) {
Daniel Vetter9d2f41f2012-04-02 21:41:45 +02001774 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001775 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1776 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1777 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
Daniel Vetterc1cd90e2011-12-14 13:57:02 +01001778 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
Ben Widawsky050ee912012-08-22 11:32:15 -07001779 if (ring->id == RCS)
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001780 error->bbaddr = I915_READ64(BB_ADDR);
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001781 } else {
Daniel Vetter9d2f41f2012-04-02 21:41:45 +02001782 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001783 error->ipeir[ring->id] = I915_READ(IPEIR);
1784 error->ipehr[ring->id] = I915_READ(IPEHR);
1785 error->instdone[ring->id] = I915_READ(INSTDONE);
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001786 }
1787
Ben Widawsky9574b3f2012-04-26 16:03:01 -07001788 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
Daniel Vetterc1cd90e2011-12-14 13:57:02 +01001789 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01001790 error->seqno[ring->id] = ring->get_seqno(ring, false);
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001791 error->acthd[ring->id] = intel_ring_get_active_head(ring);
Daniel Vetterc1cd90e2011-12-14 13:57:02 +01001792 error->head[ring->id] = I915_READ_HEAD(ring);
1793 error->tail[ring->id] = I915_READ_TAIL(ring);
Chris Wilson0f3b6842013-01-15 12:05:55 +00001794 error->ctl[ring->id] = I915_READ_CTL(ring);
Daniel Vetter7e3b8732012-02-01 22:26:45 +01001795
1796 error->cpu_ring_head[ring->id] = ring->head;
1797 error->cpu_ring_tail[ring->id] = ring->tail;
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001798}
1799
Ben Widawsky8c123e52013-03-04 17:00:29 -08001800
1801static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1802 struct drm_i915_error_state *error,
1803 struct drm_i915_error_ring *ering)
1804{
1805 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1806 struct drm_i915_gem_object *obj;
1807
1808 /* Currently render ring is the only HW context user */
1809 if (ring->id != RCS || !error->ccid)
1810 return;
1811
Ben Widawsky35c20a62013-05-31 11:28:48 -07001812 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Ben Widawsky8c123e52013-03-04 17:00:29 -08001813 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1814 ering->ctx = i915_error_object_create_sized(dev_priv,
1815 obj, 1);
1816 }
1817 }
1818}
1819
Chris Wilson52d39a22012-02-15 11:25:37 +00001820static void i915_gem_record_rings(struct drm_device *dev,
1821 struct drm_i915_error_state *error)
1822{
1823 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01001824 struct intel_ring_buffer *ring;
Chris Wilson52d39a22012-02-15 11:25:37 +00001825 struct drm_i915_gem_request *request;
1826 int i, count;
1827
Chris Wilsonb4519512012-05-11 14:29:30 +01001828 for_each_ring(ring, dev_priv, i) {
Chris Wilson52d39a22012-02-15 11:25:37 +00001829 i915_record_ring_state(dev, error, ring);
1830
1831 error->ring[i].batchbuffer =
1832 i915_error_first_batchbuffer(dev_priv, ring);
1833
1834 error->ring[i].ringbuffer =
1835 i915_error_object_create(dev_priv, ring->obj);
1836
Ben Widawsky8c123e52013-03-04 17:00:29 -08001837
1838 i915_gem_record_active_context(ring, error, &error->ring[i]);
1839
Chris Wilson52d39a22012-02-15 11:25:37 +00001840 count = 0;
1841 list_for_each_entry(request, &ring->request_list, list)
1842 count++;
1843
1844 error->ring[i].num_requests = count;
1845 error->ring[i].requests =
1846 kmalloc(count*sizeof(struct drm_i915_error_request),
1847 GFP_ATOMIC);
1848 if (error->ring[i].requests == NULL) {
1849 error->ring[i].num_requests = 0;
1850 continue;
1851 }
1852
1853 count = 0;
1854 list_for_each_entry(request, &ring->request_list, list) {
1855 struct drm_i915_error_request *erq;
1856
1857 erq = &error->ring[i].requests[count++];
1858 erq->seqno = request->seqno;
1859 erq->jiffies = request->emitted_jiffies;
Chris Wilsonee4f42b2012-02-15 11:25:38 +00001860 erq->tail = request->tail;
Chris Wilson52d39a22012-02-15 11:25:37 +00001861 }
1862 }
1863}
1864
Jesse Barnes8a905232009-07-11 16:48:03 -04001865/**
1866 * i915_capture_error_state - capture an error record for later analysis
1867 * @dev: drm device
1868 *
1869 * Should be called when an error is detected (either a hang or an error
1870 * interrupt) to capture error state from the time of the error. Fills
1871 * out a structure which becomes available in debugfs for user level tools
1872 * to pick up.
1873 */
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001874static void i915_capture_error_state(struct drm_device *dev)
1875{
1876 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001877 struct drm_i915_gem_object *obj;
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001878 struct drm_i915_error_state *error;
1879 unsigned long flags;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001880 int i, pipe;
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001881
Daniel Vetter99584db2012-11-14 17:14:04 +01001882 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1883 error = dev_priv->gpu_error.first_error;
1884 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
Chris Wilson9df30792010-02-18 10:24:56 +00001885 if (error)
1886 return;
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001887
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001888 /* Account for pipe specific data like PIPE*STAT */
Daniel Vetter33f3f512011-12-14 13:57:39 +01001889 error = kzalloc(sizeof(*error), GFP_ATOMIC);
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001890 if (!error) {
Chris Wilson9df30792010-02-18 10:24:56 +00001891 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1892 return;
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001893 }
1894
Paulo Zanoni5d83d292013-03-06 20:03:22 -03001895 DRM_INFO("capturing error event; look for more information in "
Ben Widawsky2f86f192013-01-28 15:32:15 -08001896 "/sys/kernel/debug/dri/%d/i915_error_state\n",
Chris Wilsonb6f78332011-02-01 14:15:55 +00001897 dev->primary->index);
Chris Wilson2fa772f2010-10-01 13:23:27 +01001898
Daniel Vetter742cbee2012-04-27 15:17:39 +02001899 kref_init(&error->ref);
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001900 error->eir = I915_READ(EIR);
1901 error->pgtbl_er = I915_READ(PGTBL_ER);
Ben Widawsky211816e2013-02-24 18:10:01 -08001902 if (HAS_HW_CONTEXTS(dev))
1903 error->ccid = I915_READ(CCID);
Ben Widawskybe998e22012-04-26 16:03:00 -07001904
1905 if (HAS_PCH_SPLIT(dev))
1906 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1907 else if (IS_VALLEYVIEW(dev))
1908 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1909 else if (IS_GEN2(dev))
1910 error->ier = I915_READ16(IER);
1911 else
1912 error->ier = I915_READ(IER);
1913
Chris Wilson0f3b6842013-01-15 12:05:55 +00001914 if (INTEL_INFO(dev)->gen >= 6)
1915 error->derrmr = I915_READ(DERRMR);
1916
1917 if (IS_VALLEYVIEW(dev))
1918 error->forcewake = I915_READ(FORCEWAKE_VLV);
1919 else if (INTEL_INFO(dev)->gen >= 7)
1920 error->forcewake = I915_READ(FORCEWAKE_MT);
1921 else if (INTEL_INFO(dev)->gen == 6)
1922 error->forcewake = I915_READ(FORCEWAKE);
1923
Paulo Zanoni4f3308b2013-03-22 14:24:16 -03001924 if (!HAS_PCH_SPLIT(dev))
1925 for_each_pipe(pipe)
1926 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
Daniel Vetterd27b1e02011-12-14 13:57:01 +01001927
Daniel Vetter33f3f512011-12-14 13:57:39 +01001928 if (INTEL_INFO(dev)->gen >= 6) {
Chris Wilsonf4068392010-10-27 20:36:41 +01001929 error->error = I915_READ(ERROR_GEN6);
Daniel Vetter33f3f512011-12-14 13:57:39 +01001930 error->done_reg = I915_READ(DONE_REG);
1931 }
Chris Wilsonadd354d2010-10-29 19:00:51 +01001932
Ben Widawsky71e172e2012-08-20 16:15:13 -07001933 if (INTEL_INFO(dev)->gen == 7)
1934 error->err_int = I915_READ(GEN7_ERR_INT);
1935
Ben Widawsky050ee912012-08-22 11:32:15 -07001936 i915_get_extra_instdone(dev, error->extra_instdone);
1937
Chris Wilson748ebc62010-10-24 10:28:47 +01001938 i915_gem_record_fences(dev, error);
Chris Wilson52d39a22012-02-15 11:25:37 +00001939 i915_gem_record_rings(dev, error);
Chris Wilson9df30792010-02-18 10:24:56 +00001940
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001941 /* Record buffers on the active and pinned lists. */
Chris Wilson9df30792010-02-18 10:24:56 +00001942 error->active_bo = NULL;
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001943 error->pinned_bo = NULL;
Chris Wilson9df30792010-02-18 10:24:56 +00001944
Chris Wilsonbcfb2e22011-01-07 21:06:07 +00001945 i = 0;
1946 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1947 i++;
1948 error->active_bo_count = i;
Ben Widawsky35c20a62013-05-31 11:28:48 -07001949 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
Chris Wilson1b502472012-04-24 15:47:30 +01001950 if (obj->pin_count)
1951 i++;
Chris Wilsonbcfb2e22011-01-07 21:06:07 +00001952 error->pinned_bo_count = i - error->active_bo_count;
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001953
Chris Wilson8e934db2011-01-24 12:34:00 +00001954 error->active_bo = NULL;
1955 error->pinned_bo = NULL;
Chris Wilsonbcfb2e22011-01-07 21:06:07 +00001956 if (i) {
1957 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
Chris Wilson9df30792010-02-18 10:24:56 +00001958 GFP_ATOMIC);
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001959 if (error->active_bo)
1960 error->pinned_bo =
1961 error->active_bo + error->active_bo_count;
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001962 }
1963
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001964 if (error->active_bo)
1965 error->active_bo_count =
Chris Wilson1b502472012-04-24 15:47:30 +01001966 capture_active_bo(error->active_bo,
1967 error->active_bo_count,
1968 &dev_priv->mm.active_list);
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001969
1970 if (error->pinned_bo)
1971 error->pinned_bo_count =
Chris Wilson1b502472012-04-24 15:47:30 +01001972 capture_pinned_bo(error->pinned_bo,
1973 error->pinned_bo_count,
Chris Wilson6c085a72012-08-20 11:40:46 +02001974 &dev_priv->mm.bound_list);
Chris Wilsonc724e8a2010-11-22 08:07:02 +00001975
Jesse Barnes8a905232009-07-11 16:48:03 -04001976 do_gettimeofday(&error->time);
1977
Chris Wilson6ef3d422010-08-04 20:26:07 +01001978 error->overlay = intel_overlay_capture_error_state(dev);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +00001979 error->display = intel_display_capture_error_state(dev);
Chris Wilson6ef3d422010-08-04 20:26:07 +01001980
Daniel Vetter99584db2012-11-14 17:14:04 +01001981 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1982 if (dev_priv->gpu_error.first_error == NULL) {
1983 dev_priv->gpu_error.first_error = error;
Chris Wilson9df30792010-02-18 10:24:56 +00001984 error = NULL;
1985 }
Daniel Vetter99584db2012-11-14 17:14:04 +01001986 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
Chris Wilson9df30792010-02-18 10:24:56 +00001987
1988 if (error)
Daniel Vetter742cbee2012-04-27 15:17:39 +02001989 i915_error_state_free(&error->ref);
Chris Wilson9df30792010-02-18 10:24:56 +00001990}
1991
1992void i915_destroy_error_state(struct drm_device *dev)
1993{
1994 struct drm_i915_private *dev_priv = dev->dev_private;
1995 struct drm_i915_error_state *error;
Ben Widawsky6dc0e812012-01-23 15:30:02 -08001996 unsigned long flags;
Chris Wilson9df30792010-02-18 10:24:56 +00001997
Daniel Vetter99584db2012-11-14 17:14:04 +01001998 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1999 error = dev_priv->gpu_error.first_error;
2000 dev_priv->gpu_error.first_error = NULL;
2001 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
Chris Wilson9df30792010-02-18 10:24:56 +00002002
2003 if (error)
Daniel Vetter742cbee2012-04-27 15:17:39 +02002004 kref_put(&error->ref, i915_error_state_free);
Jesse Barnes63eeaf32009-06-18 16:56:52 -07002005}
Chris Wilson3bd3c932010-08-19 08:19:30 +01002006#else
2007#define i915_capture_error_state(x)
2008#endif
Jesse Barnes63eeaf32009-06-18 16:56:52 -07002009
Chris Wilson35aed2e2010-05-27 13:18:12 +01002010static void i915_report_and_clear_eir(struct drm_device *dev)
Jesse Barnes8a905232009-07-11 16:48:03 -04002011{
2012 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskybd9854f2012-08-23 15:18:09 -07002013 uint32_t instdone[I915_NUM_INSTDONE_REG];
Jesse Barnes8a905232009-07-11 16:48:03 -04002014 u32 eir = I915_READ(EIR);
Ben Widawsky050ee912012-08-22 11:32:15 -07002015 int pipe, i;
Jesse Barnes8a905232009-07-11 16:48:03 -04002016
Chris Wilson35aed2e2010-05-27 13:18:12 +01002017 if (!eir)
2018 return;
Jesse Barnes8a905232009-07-11 16:48:03 -04002019
Joe Perchesa70491c2012-03-18 13:00:11 -07002020 pr_err("render error detected, EIR: 0x%08x\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04002021
Ben Widawskybd9854f2012-08-23 15:18:09 -07002022 i915_get_extra_instdone(dev, instdone);
2023
Jesse Barnes8a905232009-07-11 16:48:03 -04002024 if (IS_G4X(dev)) {
2025 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2026 u32 ipeir = I915_READ(IPEIR_I965);
2027
Joe Perchesa70491c2012-03-18 13:00:11 -07002028 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2029 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Ben Widawsky050ee912012-08-22 11:32:15 -07002030 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2031 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Joe Perchesa70491c2012-03-18 13:00:11 -07002032 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002033 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002034 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002035 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002036 }
2037 if (eir & GM45_ERROR_PAGE_TABLE) {
2038 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002039 pr_err("page table error\n");
2040 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002041 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002042 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002043 }
2044 }
2045
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002046 if (!IS_GEN2(dev)) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002047 if (eir & I915_ERROR_PAGE_TABLE) {
2048 u32 pgtbl_err = I915_READ(PGTBL_ER);
Joe Perchesa70491c2012-03-18 13:00:11 -07002049 pr_err("page table error\n");
2050 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
Jesse Barnes8a905232009-07-11 16:48:03 -04002051 I915_WRITE(PGTBL_ER, pgtbl_err);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002052 POSTING_READ(PGTBL_ER);
Jesse Barnes8a905232009-07-11 16:48:03 -04002053 }
2054 }
2055
2056 if (eir & I915_ERROR_MEMORY_REFRESH) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002057 pr_err("memory refresh error:\n");
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002058 for_each_pipe(pipe)
Joe Perchesa70491c2012-03-18 13:00:11 -07002059 pr_err("pipe %c stat: 0x%08x\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002060 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
Jesse Barnes8a905232009-07-11 16:48:03 -04002061 /* pipestat has already been acked */
2062 }
2063 if (eir & I915_ERROR_INSTRUCTION) {
Joe Perchesa70491c2012-03-18 13:00:11 -07002064 pr_err("instruction error\n");
2065 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
Ben Widawsky050ee912012-08-22 11:32:15 -07002066 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2067 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002068 if (INTEL_INFO(dev)->gen < 4) {
Jesse Barnes8a905232009-07-11 16:48:03 -04002069 u32 ipeir = I915_READ(IPEIR);
2070
Joe Perchesa70491c2012-03-18 13:00:11 -07002071 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2072 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
Joe Perchesa70491c2012-03-18 13:00:11 -07002073 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
Jesse Barnes8a905232009-07-11 16:48:03 -04002074 I915_WRITE(IPEIR, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002075 POSTING_READ(IPEIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002076 } else {
2077 u32 ipeir = I915_READ(IPEIR_I965);
2078
Joe Perchesa70491c2012-03-18 13:00:11 -07002079 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2080 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
Joe Perchesa70491c2012-03-18 13:00:11 -07002081 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
Joe Perchesa70491c2012-03-18 13:00:11 -07002082 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002083 I915_WRITE(IPEIR_I965, ipeir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002084 POSTING_READ(IPEIR_I965);
Jesse Barnes8a905232009-07-11 16:48:03 -04002085 }
2086 }
2087
2088 I915_WRITE(EIR, eir);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002089 POSTING_READ(EIR);
Jesse Barnes8a905232009-07-11 16:48:03 -04002090 eir = I915_READ(EIR);
2091 if (eir) {
2092 /*
2093 * some errors might have become stuck,
2094 * mask them.
2095 */
2096 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2097 I915_WRITE(EMR, I915_READ(EMR) | eir);
2098 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2099 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01002100}
2101
2102/**
2103 * i915_handle_error - handle an error interrupt
2104 * @dev: drm device
2105 *
2106 * Do some basic checking of regsiter state at error interrupt time and
2107 * dump it to the syslog. Also call i915_capture_error_state() to make
2108 * sure we get a record and make it available in debugfs. Fire a uevent
2109 * so userspace knows something bad happened (should trigger collection
2110 * of a ring dump etc.).
2111 */
Chris Wilson527f9e92010-11-11 01:16:58 +00002112void i915_handle_error(struct drm_device *dev, bool wedged)
Chris Wilson35aed2e2010-05-27 13:18:12 +01002113{
2114 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002115 struct intel_ring_buffer *ring;
2116 int i;
Chris Wilson35aed2e2010-05-27 13:18:12 +01002117
2118 i915_capture_error_state(dev);
2119 i915_report_and_clear_eir(dev);
Jesse Barnes8a905232009-07-11 16:48:03 -04002120
Ben Gamariba1234d2009-09-14 17:48:47 -04002121 if (wedged) {
Daniel Vetterf69061b2012-12-06 09:01:42 +01002122 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2123 &dev_priv->gpu_error.reset_counter);
Ben Gamariba1234d2009-09-14 17:48:47 -04002124
Ben Gamari11ed50e2009-09-14 17:48:45 -04002125 /*
Daniel Vetter1f83fee2012-11-15 17:17:22 +01002126 * Wakeup waiting processes so that the reset work item
2127 * doesn't deadlock trying to grab various locks.
Ben Gamari11ed50e2009-09-14 17:48:45 -04002128 */
Chris Wilsonb4519512012-05-11 14:29:30 +01002129 for_each_ring(ring, dev_priv, i)
2130 wake_up_all(&ring->irq_queue);
Ben Gamari11ed50e2009-09-14 17:48:45 -04002131 }
2132
Daniel Vetter99584db2012-11-14 17:14:04 +01002133 queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
Jesse Barnes8a905232009-07-11 16:48:03 -04002134}
2135
Ville Syrjälä21ad8332013-02-19 15:16:39 +02002136static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002137{
2138 drm_i915_private_t *dev_priv = dev->dev_private;
2139 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2140 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Chris Wilson05394f32010-11-08 19:18:58 +00002141 struct drm_i915_gem_object *obj;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002142 struct intel_unpin_work *work;
2143 unsigned long flags;
2144 bool stall_detected;
2145
2146 /* Ignore early vblank irqs */
2147 if (intel_crtc == NULL)
2148 return;
2149
2150 spin_lock_irqsave(&dev->event_lock, flags);
2151 work = intel_crtc->unpin_work;
2152
Chris Wilsone7d841c2012-12-03 11:36:30 +00002153 if (work == NULL ||
2154 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2155 !work->enable_stall_check) {
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002156 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2157 spin_unlock_irqrestore(&dev->event_lock, flags);
2158 return;
2159 }
2160
2161 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
Chris Wilson05394f32010-11-08 19:18:58 +00002162 obj = work->pending_flip_obj;
Chris Wilsona6c45cf2010-09-17 00:32:17 +01002163 if (INTEL_INFO(dev)->gen >= 4) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002164 int dspsurf = DSPSURF(intel_crtc->plane);
Armin Reese446f2542012-03-30 16:20:16 -07002165 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2166 obj->gtt_offset;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002167 } else {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002168 int dspaddr = DSPADDR(intel_crtc->plane);
Chris Wilson05394f32010-11-08 19:18:58 +00002169 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
Ville Syrjälä01f2c772011-12-20 00:06:49 +02002170 crtc->y * crtc->fb->pitches[0] +
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002171 crtc->x * crtc->fb->bits_per_pixel/8);
2172 }
2173
2174 spin_unlock_irqrestore(&dev->event_lock, flags);
2175
2176 if (stall_detected) {
2177 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2178 intel_prepare_page_flip(dev, intel_crtc->plane);
2179 }
2180}
2181
Keith Packard42f52ef2008-10-18 19:39:29 -07002182/* Called from drm generic code, passed 'crtc' which
2183 * we use as a pipe index
2184 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002185static int i915_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002186{
2187 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002188 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002189
Chris Wilson5eddb702010-09-11 13:48:45 +01002190 if (!i915_pipe_enabled(dev, pipe))
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002191 return -EINVAL;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002192
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002193 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002194 if (INTEL_INFO(dev)->gen >= 4)
Keith Packard7c463582008-11-04 02:03:27 -08002195 i915_enable_pipestat(dev_priv, pipe,
2196 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Keith Packarde9d21d72008-10-16 11:31:38 -07002197 else
Keith Packard7c463582008-11-04 02:03:27 -08002198 i915_enable_pipestat(dev_priv, pipe,
2199 PIPE_VBLANK_INTERRUPT_ENABLE);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002200
2201 /* maintain vblank delivery even in deep C-states */
2202 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002203 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002204 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002205
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002206 return 0;
2207}
2208
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002209static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002210{
2211 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2212 unsigned long irqflags;
2213
2214 if (!i915_pipe_enabled(dev, pipe))
2215 return -EINVAL;
2216
2217 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2218 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
Akshay Joshi0206e352011-08-16 15:34:10 -04002219 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002220 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2221
2222 return 0;
2223}
2224
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002225static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002226{
2227 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2228 unsigned long irqflags;
2229
2230 if (!i915_pipe_enabled(dev, pipe))
2231 return -EINVAL;
2232
2233 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilsonb615b572012-05-02 09:52:12 +01002234 ironlake_enable_display_irq(dev_priv,
2235 DE_PIPEA_VBLANK_IVB << (5 * pipe));
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002236 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2237
2238 return 0;
2239}
2240
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002241static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2242{
2243 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2244 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002245 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002246
2247 if (!i915_pipe_enabled(dev, pipe))
2248 return -EINVAL;
2249
2250 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002251 imr = I915_READ(VLV_IMR);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002252 if (pipe == 0)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002253 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002254 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002255 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002256 I915_WRITE(VLV_IMR, imr);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002257 i915_enable_pipestat(dev_priv, pipe,
2258 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002259 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2260
2261 return 0;
2262}
2263
Keith Packard42f52ef2008-10-18 19:39:29 -07002264/* Called from drm generic code, passed 'crtc' which
2265 * we use as a pipe index
2266 */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002267static void i915_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002268{
2269 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Keith Packarde9d21d72008-10-16 11:31:38 -07002270 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002271
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002272 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002273 if (dev_priv->info->gen == 3)
Daniel Vetter6b26c862012-04-24 14:04:12 +02002274 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
Chris Wilson8692d00e2011-02-05 10:08:21 +00002275
Jesse Barnesf796cf82011-04-07 13:58:17 -07002276 i915_disable_pipestat(dev_priv, pipe,
2277 PIPE_VBLANK_INTERRUPT_ENABLE |
2278 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2279 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2280}
2281
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002282static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002283{
2284 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2285 unsigned long irqflags;
2286
2287 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2288 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
Akshay Joshi0206e352011-08-16 15:34:10 -04002289 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002290 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002291}
2292
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002293static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002294{
2295 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2296 unsigned long irqflags;
2297
2298 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilsonb615b572012-05-02 09:52:12 +01002299 ironlake_disable_display_irq(dev_priv,
2300 DE_PIPEA_VBLANK_IVB << (pipe * 5));
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002301 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2302}
2303
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002304static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2305{
2306 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2307 unsigned long irqflags;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002308 u32 imr;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002309
2310 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002311 i915_disable_pipestat(dev_priv, pipe,
2312 PIPE_START_VBLANK_INTERRUPT_ENABLE);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002313 imr = I915_READ(VLV_IMR);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002314 if (pipe == 0)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002315 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002316 else
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002317 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002318 I915_WRITE(VLV_IMR, imr);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002319 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2320}
2321
Chris Wilson893eead2010-10-27 14:44:35 +01002322static u32
2323ring_last_seqno(struct intel_ring_buffer *ring)
Zou Nan hai852835f2010-05-21 09:08:56 +08002324{
Chris Wilson893eead2010-10-27 14:44:35 +01002325 return list_entry(ring->request_list.prev,
2326 struct drm_i915_gem_request, list)->seqno;
2327}
2328
Chris Wilson9107e9d2013-06-10 11:20:20 +01002329static bool
2330ring_idle(struct intel_ring_buffer *ring, u32 seqno)
Chris Wilson893eead2010-10-27 14:44:35 +01002331{
Chris Wilson9107e9d2013-06-10 11:20:20 +01002332 return (list_empty(&ring->request_list) ||
2333 i915_seqno_passed(seqno, ring_last_seqno(ring)));
Ben Gamarif65d9422009-09-14 17:48:44 -04002334}
2335
Chris Wilson6274f212013-06-10 11:20:21 +01002336static struct intel_ring_buffer *
2337semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
Chris Wilsona24a11e2013-03-14 17:52:05 +02002338{
2339 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Chris Wilson6274f212013-06-10 11:20:21 +01002340 u32 cmd, ipehr, acthd, acthd_min;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002341
2342 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2343 if ((ipehr & ~(0x3 << 16)) !=
2344 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
Chris Wilson6274f212013-06-10 11:20:21 +01002345 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002346
2347 /* ACTHD is likely pointing to the dword after the actual command,
2348 * so scan backwards until we find the MBOX.
2349 */
Chris Wilson6274f212013-06-10 11:20:21 +01002350 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002351 acthd_min = max((int)acthd - 3 * 4, 0);
2352 do {
2353 cmd = ioread32(ring->virtual_start + acthd);
2354 if (cmd == ipehr)
2355 break;
2356
2357 acthd -= 4;
2358 if (acthd < acthd_min)
Chris Wilson6274f212013-06-10 11:20:21 +01002359 return NULL;
Chris Wilsona24a11e2013-03-14 17:52:05 +02002360 } while (1);
2361
Chris Wilson6274f212013-06-10 11:20:21 +01002362 *seqno = ioread32(ring->virtual_start+acthd+4)+1;
2363 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
Chris Wilsona24a11e2013-03-14 17:52:05 +02002364}
2365
Chris Wilson6274f212013-06-10 11:20:21 +01002366static int semaphore_passed(struct intel_ring_buffer *ring)
2367{
2368 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2369 struct intel_ring_buffer *signaller;
2370 u32 seqno, ctl;
2371
2372 ring->hangcheck.deadlock = true;
2373
2374 signaller = semaphore_waits_for(ring, &seqno);
2375 if (signaller == NULL || signaller->hangcheck.deadlock)
2376 return -1;
2377
2378 /* cursory check for an unkickable deadlock */
2379 ctl = I915_READ_CTL(signaller);
2380 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2381 return -1;
2382
2383 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2384}
2385
2386static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2387{
2388 struct intel_ring_buffer *ring;
2389 int i;
2390
2391 for_each_ring(ring, dev_priv, i)
2392 ring->hangcheck.deadlock = false;
2393}
2394
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002395static enum intel_ring_hangcheck_action
2396ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002397{
2398 struct drm_device *dev = ring->dev;
2399 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002400 u32 tmp;
2401
Chris Wilson6274f212013-06-10 11:20:21 +01002402 if (ring->hangcheck.acthd != acthd)
2403 return active;
2404
Chris Wilson9107e9d2013-06-10 11:20:20 +01002405 if (IS_GEN2(dev))
Chris Wilson6274f212013-06-10 11:20:21 +01002406 return hung;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002407
2408 /* Is the chip hanging on a WAIT_FOR_EVENT?
2409 * If so we can simply poke the RB_WAIT bit
2410 * and break the hang. This should work on
2411 * all but the second generation chipsets.
2412 */
2413 tmp = I915_READ_CTL(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002414 if (tmp & RING_WAIT) {
2415 DRM_ERROR("Kicking stuck wait on %s\n",
2416 ring->name);
2417 I915_WRITE_CTL(ring, tmp);
Chris Wilson6274f212013-06-10 11:20:21 +01002418 return kick;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002419 }
Chris Wilsona24a11e2013-03-14 17:52:05 +02002420
Chris Wilson6274f212013-06-10 11:20:21 +01002421 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2422 switch (semaphore_passed(ring)) {
2423 default:
2424 return hung;
2425 case 1:
2426 DRM_ERROR("Kicking stuck semaphore on %s\n",
2427 ring->name);
2428 I915_WRITE_CTL(ring, tmp);
2429 return kick;
2430 case 0:
2431 return wait;
2432 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002433 }
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002434
Chris Wilson6274f212013-06-10 11:20:21 +01002435 return hung;
Mika Kuoppalaed5cbb02013-05-13 16:32:11 +03002436}
2437
Ben Gamarif65d9422009-09-14 17:48:44 -04002438/**
2439 * This is called when the chip hasn't reported back with completed
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002440 * batchbuffers in a long time. We keep track per ring seqno progress and
2441 * if there are no progress, hangcheck score for that ring is increased.
2442 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2443 * we kick the ring. If we see no progress on three subsequent calls
2444 * we assume chip is wedged and try to fix it by resetting the chip.
Ben Gamarif65d9422009-09-14 17:48:44 -04002445 */
2446void i915_hangcheck_elapsed(unsigned long data)
2447{
2448 struct drm_device *dev = (struct drm_device *)data;
2449 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002450 struct intel_ring_buffer *ring;
Chris Wilsonb4519512012-05-11 14:29:30 +01002451 int i;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002452 int busy_count = 0, rings_hung = 0;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002453 bool stuck[I915_NUM_RINGS] = { 0 };
2454#define BUSY 1
2455#define KICK 5
2456#define HUNG 20
2457#define FIRE 30
Chris Wilson893eead2010-10-27 14:44:35 +01002458
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07002459 if (!i915_enable_hangcheck)
2460 return;
2461
Chris Wilsonb4519512012-05-11 14:29:30 +01002462 for_each_ring(ring, dev_priv, i) {
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002463 u32 seqno, acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002464 bool busy = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01002465
Chris Wilson6274f212013-06-10 11:20:21 +01002466 semaphore_clear_deadlocks(dev_priv);
2467
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002468 seqno = ring->get_seqno(ring, false);
2469 acthd = intel_ring_get_active_head(ring);
Chris Wilsond1e61e72012-04-10 17:00:41 +01002470
Chris Wilson9107e9d2013-06-10 11:20:20 +01002471 if (ring->hangcheck.seqno == seqno) {
2472 if (ring_idle(ring, seqno)) {
2473 if (waitqueue_active(&ring->irq_queue)) {
2474 /* Issue a wake-up to catch stuck h/w. */
2475 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2476 ring->name);
2477 wake_up_all(&ring->irq_queue);
2478 ring->hangcheck.score += HUNG;
2479 } else
2480 busy = false;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002481 } else {
Chris Wilson9107e9d2013-06-10 11:20:20 +01002482 int score;
2483
Chris Wilson6274f212013-06-10 11:20:21 +01002484 /* We always increment the hangcheck score
2485 * if the ring is busy and still processing
2486 * the same request, so that no single request
2487 * can run indefinitely (such as a chain of
2488 * batches). The only time we do not increment
2489 * the hangcheck score on this ring, if this
2490 * ring is in a legitimate wait for another
2491 * ring. In that case the waiting ring is a
2492 * victim and we want to be sure we catch the
2493 * right culprit. Then every time we do kick
2494 * the ring, add a small increment to the
2495 * score so that we can catch a batch that is
2496 * being repeatedly kicked and so responsible
2497 * for stalling the machine.
2498 */
Mika Kuoppalaad8beae2013-06-12 12:35:32 +03002499 ring->hangcheck.action = ring_stuck(ring,
2500 acthd);
2501
2502 switch (ring->hangcheck.action) {
Chris Wilson6274f212013-06-10 11:20:21 +01002503 case wait:
2504 score = 0;
2505 break;
2506 case active:
Chris Wilson9107e9d2013-06-10 11:20:20 +01002507 score = BUSY;
Chris Wilson6274f212013-06-10 11:20:21 +01002508 break;
2509 case kick:
2510 score = KICK;
2511 break;
2512 case hung:
2513 score = HUNG;
2514 stuck[i] = true;
2515 break;
2516 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002517 ring->hangcheck.score += score;
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002518 }
Chris Wilson9107e9d2013-06-10 11:20:20 +01002519 } else {
2520 /* Gradually reduce the count so that we catch DoS
2521 * attempts across multiple batches.
2522 */
2523 if (ring->hangcheck.score > 0)
2524 ring->hangcheck.score--;
Chris Wilsond1e61e72012-04-10 17:00:41 +01002525 }
2526
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002527 ring->hangcheck.seqno = seqno;
2528 ring->hangcheck.acthd = acthd;
Chris Wilson9107e9d2013-06-10 11:20:20 +01002529 busy_count += busy;
Chris Wilson893eead2010-10-27 14:44:35 +01002530 }
Eric Anholtb9201c12010-01-08 14:25:16 -08002531
Mika Kuoppala92cab732013-05-24 17:16:07 +03002532 for_each_ring(ring, dev_priv, i) {
Chris Wilson9107e9d2013-06-10 11:20:20 +01002533 if (ring->hangcheck.score > FIRE) {
Ben Widawskyacd78c12013-06-13 21:33:33 -07002534 DRM_ERROR("%s on %s\n",
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002535 stuck[i] ? "stuck" : "no progress",
Chris Wilsona43adf02013-06-10 11:20:22 +01002536 ring->name);
2537 rings_hung++;
Mika Kuoppala92cab732013-05-24 17:16:07 +03002538 }
2539 }
2540
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002541 if (rings_hung)
2542 return i915_handle_error(dev, true);
Ben Gamarif65d9422009-09-14 17:48:44 -04002543
Mika Kuoppala05407ff2013-05-30 09:04:29 +03002544 if (busy_count)
2545 /* Reset timer case chip hangs without another request
2546 * being added */
2547 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2548 round_jiffies_up(jiffies +
2549 DRM_I915_HANGCHECK_JIFFIES));
Ben Gamarif65d9422009-09-14 17:48:44 -04002550}
2551
Paulo Zanoni91738a92013-06-05 14:21:51 -03002552static void ibx_irq_preinstall(struct drm_device *dev)
2553{
2554 struct drm_i915_private *dev_priv = dev->dev_private;
2555
2556 if (HAS_PCH_NOP(dev))
2557 return;
2558
2559 /* south display irq */
2560 I915_WRITE(SDEIMR, 0xffffffff);
2561 /*
2562 * SDEIER is also touched by the interrupt handler to work around missed
2563 * PCH interrupts. Hence we can't update it after the interrupt handler
2564 * is enabled - instead we unconditionally enable all PCH interrupt
2565 * sources here, but then only unmask them as needed with SDEIMR.
2566 */
2567 I915_WRITE(SDEIER, 0xffffffff);
2568 POSTING_READ(SDEIER);
2569}
2570
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571/* drm_dma.h hooks
2572*/
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002573static void ironlake_irq_preinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002574{
2575 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2576
Jesse Barnes46979952011-04-07 13:53:55 -07002577 atomic_set(&dev_priv->irq_received, 0);
2578
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002579 I915_WRITE(HWSTAM, 0xeffe);
Daniel Vetterbdfcdb62012-01-05 01:05:26 +01002580
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002581 /* XXX hotplug from PCH */
2582
2583 I915_WRITE(DEIMR, 0xffffffff);
2584 I915_WRITE(DEIER, 0x0);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002585 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002586
2587 /* and GT */
2588 I915_WRITE(GTIMR, 0xffffffff);
2589 I915_WRITE(GTIER, 0x0);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002590 POSTING_READ(GTIER);
Zhenyu Wangc6501562009-11-03 18:57:21 +00002591
Paulo Zanoni91738a92013-06-05 14:21:51 -03002592 ibx_irq_preinstall(dev);
Ben Widawsky7d991632013-05-28 19:22:25 -07002593}
2594
2595static void ivybridge_irq_preinstall(struct drm_device *dev)
2596{
2597 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2598
2599 atomic_set(&dev_priv->irq_received, 0);
2600
2601 I915_WRITE(HWSTAM, 0xeffe);
2602
2603 /* XXX hotplug from PCH */
2604
2605 I915_WRITE(DEIMR, 0xffffffff);
2606 I915_WRITE(DEIER, 0x0);
2607 POSTING_READ(DEIER);
2608
2609 /* and GT */
2610 I915_WRITE(GTIMR, 0xffffffff);
2611 I915_WRITE(GTIER, 0x0);
2612 POSTING_READ(GTIER);
2613
Ben Widawskyeda63ff2013-05-28 19:22:26 -07002614 /* Power management */
2615 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2616 I915_WRITE(GEN6_PMIER, 0x0);
2617 POSTING_READ(GEN6_PMIER);
2618
Paulo Zanoni91738a92013-06-05 14:21:51 -03002619 ibx_irq_preinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002620}
2621
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002622static void valleyview_irq_preinstall(struct drm_device *dev)
2623{
2624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2625 int pipe;
2626
2627 atomic_set(&dev_priv->irq_received, 0);
2628
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002629 /* VLV magic */
2630 I915_WRITE(VLV_IMR, 0);
2631 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2632 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2633 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2634
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002635 /* and GT */
2636 I915_WRITE(GTIIR, I915_READ(GTIIR));
2637 I915_WRITE(GTIIR, I915_READ(GTIIR));
2638 I915_WRITE(GTIMR, 0xffffffff);
2639 I915_WRITE(GTIER, 0x0);
2640 POSTING_READ(GTIER);
2641
2642 I915_WRITE(DPINVGTT, 0xff);
2643
2644 I915_WRITE(PORT_HOTPLUG_EN, 0);
2645 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2646 for_each_pipe(pipe)
2647 I915_WRITE(PIPESTAT(pipe), 0xffff);
2648 I915_WRITE(VLV_IIR, 0xffffffff);
2649 I915_WRITE(VLV_IMR, 0xffffffff);
2650 I915_WRITE(VLV_IER, 0x0);
2651 POSTING_READ(VLV_IER);
2652}
2653
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002654static void ibx_hpd_irq_setup(struct drm_device *dev)
Keith Packard7fe0b972011-09-19 13:31:02 -07002655{
2656 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002657 struct drm_mode_config *mode_config = &dev->mode_config;
2658 struct intel_encoder *intel_encoder;
2659 u32 mask = ~I915_READ(SDEIMR);
2660 u32 hotplug;
Keith Packard7fe0b972011-09-19 13:31:02 -07002661
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002662 if (HAS_PCH_IBX(dev)) {
Egbert Eich995e6b32013-04-16 13:36:56 +02002663 mask &= ~SDE_HOTPLUG_MASK;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002664 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002665 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2666 mask |= hpd_ibx[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002667 } else {
Egbert Eich995e6b32013-04-16 13:36:56 +02002668 mask &= ~SDE_HOTPLUG_MASK_CPT;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002669 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
Egbert Eichcd569ae2013-04-16 13:36:57 +02002670 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2671 mask |= hpd_cpt[intel_encoder->hpd_pin];
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002672 }
2673
2674 I915_WRITE(SDEIMR, ~mask);
2675
2676 /*
2677 * Enable digital hotplug on the PCH, and configure the DP short pulse
2678 * duration to 2ms (which is the minimum in the Display Port spec)
2679 *
2680 * This register is the same on all known PCH chips.
2681 */
Keith Packard7fe0b972011-09-19 13:31:02 -07002682 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2683 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2684 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2685 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2686 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2687 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2688}
2689
Paulo Zanonid46da432013-02-08 17:35:15 -02002690static void ibx_irq_postinstall(struct drm_device *dev)
2691{
2692 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01002693 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02002694
Daniel Vetter692a04c2013-05-29 21:43:05 +02002695 if (HAS_PCH_NOP(dev))
2696 return;
2697
Paulo Zanoni86642812013-04-12 17:57:57 -03002698 if (HAS_PCH_IBX(dev)) {
2699 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
Paulo Zanonide032bf2013-04-12 17:57:58 -03002700 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
Paulo Zanoni86642812013-04-12 17:57:57 -03002701 } else {
2702 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2703
2704 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2705 }
Ben Widawskyab5c6082013-04-05 13:12:41 -07002706
Paulo Zanonid46da432013-02-08 17:35:15 -02002707 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2708 I915_WRITE(SDEIMR, ~mask);
Paulo Zanonid46da432013-02-08 17:35:15 -02002709}
2710
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002711static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002712{
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002713 unsigned long irqflags;
2714
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002715 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2716 /* enable kind of interrupts always enabled */
Jesse Barnes013d5aa2010-01-29 11:18:31 -08002717 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
Daniel Vetterce99c252012-12-01 13:53:47 +01002718 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
Paulo Zanoni86642812013-04-12 17:57:57 -03002719 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
Paulo Zanonide032bf2013-04-12 17:57:58 -03002720 DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
Ben Widawskycc609d52013-05-28 19:22:29 -07002721 u32 gt_irqs;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002722
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002723 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002724
2725 /* should always can generate irq */
2726 I915_WRITE(DEIIR, I915_READ(DEIIR));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002727 I915_WRITE(DEIMR, dev_priv->irq_mask);
Daniel Vetter6005ce42013-06-27 13:44:59 +02002728 I915_WRITE(DEIER, display_mask |
2729 DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002730 POSTING_READ(DEIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002731
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002732 dev_priv->gt_irq_mask = ~0;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002733
2734 I915_WRITE(GTIIR, I915_READ(GTIIR));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002735 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002736
Ben Widawskycc609d52013-05-28 19:22:29 -07002737 gt_irqs = GT_RENDER_USER_INTERRUPT;
2738
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002739 if (IS_GEN6(dev))
Ben Widawskycc609d52013-05-28 19:22:29 -07002740 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002741 else
Ben Widawskycc609d52013-05-28 19:22:29 -07002742 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2743 ILK_BSD_USER_INTERRUPT;
2744
2745 I915_WRITE(GTIER, gt_irqs);
Chris Wilson3143a2b2010-11-16 15:55:10 +00002746 POSTING_READ(GTIER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002747
Paulo Zanonid46da432013-02-08 17:35:15 -02002748 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07002749
Jesse Barnesf97108d2010-01-29 11:27:07 -08002750 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02002751 /* Enable PCU event interrupts
2752 *
2753 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002754 * setup is guaranteed to run in single-threaded context. But we
2755 * need it to make the assert_spin_locked happy. */
2756 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08002757 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetter4bc9d432013-06-27 13:44:58 +02002758 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Jesse Barnesf97108d2010-01-29 11:27:07 -08002759 }
2760
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002761 return 0;
2762}
2763
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002764static int ivybridge_irq_postinstall(struct drm_device *dev)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002765{
2766 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2767 /* enable kind of interrupts always enabled */
Chris Wilsonb615b572012-05-02 09:52:12 +01002768 u32 display_mask =
2769 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2770 DE_PLANEC_FLIP_DONE_IVB |
2771 DE_PLANEB_FLIP_DONE_IVB |
Daniel Vetterce99c252012-12-01 13:53:47 +01002772 DE_PLANEA_FLIP_DONE_IVB |
Paulo Zanoni86642812013-04-12 17:57:57 -03002773 DE_AUX_CHANNEL_A_IVB |
2774 DE_ERR_INT_IVB;
Ben Widawsky12638c52013-05-28 19:22:31 -07002775 u32 pm_irqs = GEN6_PM_RPS_EVENTS;
Ben Widawskycc609d52013-05-28 19:22:29 -07002776 u32 gt_irqs;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002777
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002778 dev_priv->irq_mask = ~display_mask;
2779
2780 /* should always can generate irq */
Paulo Zanoni86642812013-04-12 17:57:57 -03002781 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002782 I915_WRITE(DEIIR, I915_READ(DEIIR));
2783 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilsonb615b572012-05-02 09:52:12 +01002784 I915_WRITE(DEIER,
2785 display_mask |
2786 DE_PIPEC_VBLANK_IVB |
2787 DE_PIPEB_VBLANK_IVB |
2788 DE_PIPEA_VBLANK_IVB);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002789 POSTING_READ(DEIER);
2790
Ben Widawskycc609d52013-05-28 19:22:29 -07002791 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002792
2793 I915_WRITE(GTIIR, I915_READ(GTIIR));
2794 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2795
Ben Widawskycc609d52013-05-28 19:22:29 -07002796 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2797 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2798 I915_WRITE(GTIER, gt_irqs);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002799 POSTING_READ(GTIER);
2800
Ben Widawsky12638c52013-05-28 19:22:31 -07002801 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2802 if (HAS_VEBOX(dev))
2803 pm_irqs |= PM_VEBOX_USER_INTERRUPT |
2804 PM_VEBOX_CS_ERROR_INTERRUPT;
2805
2806 /* Our enable/disable rps functions may touch these registers so
2807 * make sure to set a known state for only the non-RPS bits.
2808 * The RMW is extra paranoia since this should be called after being set
2809 * to a known state in preinstall.
2810 * */
2811 I915_WRITE(GEN6_PMIMR,
2812 (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
2813 I915_WRITE(GEN6_PMIER,
2814 (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
2815 POSTING_READ(GEN6_PMIER);
Ben Widawskyeda63ff2013-05-28 19:22:26 -07002816
Paulo Zanonid46da432013-02-08 17:35:15 -02002817 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07002818
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002819 return 0;
2820}
2821
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002822static int valleyview_irq_postinstall(struct drm_device *dev)
2823{
2824 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Ben Widawskycc609d52013-05-28 19:22:29 -07002825 u32 gt_irqs;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002826 u32 enable_mask;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002827 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002828
2829 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002830 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2831 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2832 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002833 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2834
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002835 /*
2836 *Leave vblank interrupts masked initially. enable/disable will
2837 * toggle them based on usage.
2838 */
2839 dev_priv->irq_mask = (~enable_mask) |
2840 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2841 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002842
Daniel Vetter20afbda2012-12-11 14:05:07 +01002843 I915_WRITE(PORT_HOTPLUG_EN, 0);
2844 POSTING_READ(PORT_HOTPLUG_EN);
2845
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002846 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2847 I915_WRITE(VLV_IER, enable_mask);
2848 I915_WRITE(VLV_IIR, 0xffffffff);
2849 I915_WRITE(PIPESTAT(0), 0xffff);
2850 I915_WRITE(PIPESTAT(1), 0xffff);
2851 POSTING_READ(VLV_IER);
2852
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002853 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01002854 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002855 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2856
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002857 I915_WRITE(VLV_IIR, 0xffffffff);
2858 I915_WRITE(VLV_IIR, 0xffffffff);
2859
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002860 I915_WRITE(GTIIR, I915_READ(GTIIR));
Jesse Barnes31acc7f2012-06-20 10:53:11 -07002861 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
Jesse Barnes3bcedbe2012-09-19 13:29:01 -07002862
Ben Widawskycc609d52013-05-28 19:22:29 -07002863 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2864 GT_BLT_USER_INTERRUPT;
2865 I915_WRITE(GTIER, gt_irqs);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002866 POSTING_READ(GTIER);
2867
2868 /* ack & enable invalid PTE error interrupts */
2869#if 0 /* FIXME: add support to irq handler for checking these bits */
2870 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2871 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2872#endif
2873
2874 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Daniel Vetter20afbda2012-12-11 14:05:07 +01002875
2876 return 0;
2877}
2878
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002879static void valleyview_irq_uninstall(struct drm_device *dev)
2880{
2881 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2882 int pipe;
2883
2884 if (!dev_priv)
2885 return;
2886
Egbert Eichac4c16c2013-04-16 13:36:58 +02002887 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2888
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002889 for_each_pipe(pipe)
2890 I915_WRITE(PIPESTAT(pipe), 0xffff);
2891
2892 I915_WRITE(HWSTAM, 0xffffffff);
2893 I915_WRITE(PORT_HOTPLUG_EN, 0);
2894 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2895 for_each_pipe(pipe)
2896 I915_WRITE(PIPESTAT(pipe), 0xffff);
2897 I915_WRITE(VLV_IIR, 0xffffffff);
2898 I915_WRITE(VLV_IMR, 0xffffffff);
2899 I915_WRITE(VLV_IER, 0x0);
2900 POSTING_READ(VLV_IER);
2901}
2902
Jesse Barnesf71d4af2011-06-28 13:00:41 -07002903static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002904{
2905 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Jesse Barnes46979952011-04-07 13:53:55 -07002906
2907 if (!dev_priv)
2908 return;
2909
Egbert Eichac4c16c2013-04-16 13:36:58 +02002910 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2911
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002912 I915_WRITE(HWSTAM, 0xffffffff);
2913
2914 I915_WRITE(DEIMR, 0xffffffff);
2915 I915_WRITE(DEIER, 0x0);
2916 I915_WRITE(DEIIR, I915_READ(DEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03002917 if (IS_GEN7(dev))
2918 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002919
2920 I915_WRITE(GTIMR, 0xffffffff);
2921 I915_WRITE(GTIER, 0x0);
2922 I915_WRITE(GTIIR, I915_READ(GTIIR));
Keith Packard192aac1f2011-09-20 10:12:44 -07002923
Ben Widawskyab5c6082013-04-05 13:12:41 -07002924 if (HAS_PCH_NOP(dev))
2925 return;
2926
Keith Packard192aac1f2011-09-20 10:12:44 -07002927 I915_WRITE(SDEIMR, 0xffffffff);
2928 I915_WRITE(SDEIER, 0x0);
2929 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
Paulo Zanoni86642812013-04-12 17:57:57 -03002930 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2931 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002932}
2933
Chris Wilsonc2798b12012-04-22 21:13:57 +01002934static void i8xx_irq_preinstall(struct drm_device * dev)
2935{
2936 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2937 int pipe;
2938
2939 atomic_set(&dev_priv->irq_received, 0);
2940
2941 for_each_pipe(pipe)
2942 I915_WRITE(PIPESTAT(pipe), 0);
2943 I915_WRITE16(IMR, 0xffff);
2944 I915_WRITE16(IER, 0x0);
2945 POSTING_READ16(IER);
2946}
2947
2948static int i8xx_irq_postinstall(struct drm_device *dev)
2949{
2950 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2951
Chris Wilsonc2798b12012-04-22 21:13:57 +01002952 I915_WRITE16(EMR,
2953 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2954
2955 /* Unmask the interrupts that we always want on. */
2956 dev_priv->irq_mask =
2957 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2958 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2959 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2960 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2961 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2962 I915_WRITE16(IMR, dev_priv->irq_mask);
2963
2964 I915_WRITE16(IER,
2965 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2966 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2967 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2968 I915_USER_INTERRUPT);
2969 POSTING_READ16(IER);
2970
2971 return 0;
2972}
2973
Ville Syrjälä90a72f82013-02-19 23:16:44 +02002974/*
2975 * Returns true when a page flip has completed.
2976 */
2977static bool i8xx_handle_vblank(struct drm_device *dev,
2978 int pipe, u16 iir)
2979{
2980 drm_i915_private_t *dev_priv = dev->dev_private;
2981 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2982
2983 if (!drm_handle_vblank(dev, pipe))
2984 return false;
2985
2986 if ((iir & flip_pending) == 0)
2987 return false;
2988
2989 intel_prepare_page_flip(dev, pipe);
2990
2991 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2992 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2993 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2994 * the flip is completed (no longer pending). Since this doesn't raise
2995 * an interrupt per se, we watch for the change at vblank.
2996 */
2997 if (I915_READ16(ISR) & flip_pending)
2998 return false;
2999
3000 intel_finish_page_flip(dev, pipe);
3001
3002 return true;
3003}
3004
Daniel Vetterff1f5252012-10-02 15:10:55 +02003005static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003006{
3007 struct drm_device *dev = (struct drm_device *) arg;
3008 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003009 u16 iir, new_iir;
3010 u32 pipe_stats[2];
3011 unsigned long irqflags;
3012 int irq_received;
3013 int pipe;
3014 u16 flip_mask =
3015 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3016 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3017
3018 atomic_inc(&dev_priv->irq_received);
3019
3020 iir = I915_READ16(IIR);
3021 if (iir == 0)
3022 return IRQ_NONE;
3023
3024 while (iir & ~flip_mask) {
3025 /* Can't rely on pipestat interrupt bit in iir as it might
3026 * have been cleared after the pipestat interrupt was received.
3027 * It doesn't set the bit in iir again, but it still produces
3028 * interrupts (for non-MSI).
3029 */
3030 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3031 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3032 i915_handle_error(dev, false);
3033
3034 for_each_pipe(pipe) {
3035 int reg = PIPESTAT(pipe);
3036 pipe_stats[pipe] = I915_READ(reg);
3037
3038 /*
3039 * Clear the PIPE*STAT regs before the IIR
3040 */
3041 if (pipe_stats[pipe] & 0x8000ffff) {
3042 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3043 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3044 pipe_name(pipe));
3045 I915_WRITE(reg, pipe_stats[pipe]);
3046 irq_received = 1;
3047 }
3048 }
3049 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3050
3051 I915_WRITE16(IIR, iir & ~flip_mask);
3052 new_iir = I915_READ16(IIR); /* Flush posted writes */
3053
Daniel Vetterd05c6172012-04-26 23:28:09 +02003054 i915_update_dri1_breadcrumb(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003055
3056 if (iir & I915_USER_INTERRUPT)
3057 notify_ring(dev, &dev_priv->ring[RCS]);
3058
3059 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003060 i8xx_handle_vblank(dev, 0, iir))
3061 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003062
3063 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003064 i8xx_handle_vblank(dev, 1, iir))
3065 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003066
3067 iir = new_iir;
3068 }
3069
3070 return IRQ_HANDLED;
3071}
3072
3073static void i8xx_irq_uninstall(struct drm_device * dev)
3074{
3075 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3076 int pipe;
3077
Chris Wilsonc2798b12012-04-22 21:13:57 +01003078 for_each_pipe(pipe) {
3079 /* Clear enable bits; then clear status bits */
3080 I915_WRITE(PIPESTAT(pipe), 0);
3081 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3082 }
3083 I915_WRITE16(IMR, 0xffff);
3084 I915_WRITE16(IER, 0x0);
3085 I915_WRITE16(IIR, I915_READ16(IIR));
3086}
3087
Chris Wilsona266c7d2012-04-24 22:59:44 +01003088static void i915_irq_preinstall(struct drm_device * dev)
3089{
3090 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3091 int pipe;
3092
3093 atomic_set(&dev_priv->irq_received, 0);
3094
3095 if (I915_HAS_HOTPLUG(dev)) {
3096 I915_WRITE(PORT_HOTPLUG_EN, 0);
3097 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3098 }
3099
Chris Wilson00d98eb2012-04-24 22:59:48 +01003100 I915_WRITE16(HWSTAM, 0xeffe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003101 for_each_pipe(pipe)
3102 I915_WRITE(PIPESTAT(pipe), 0);
3103 I915_WRITE(IMR, 0xffffffff);
3104 I915_WRITE(IER, 0x0);
3105 POSTING_READ(IER);
3106}
3107
3108static int i915_irq_postinstall(struct drm_device *dev)
3109{
3110 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson38bde182012-04-24 22:59:50 +01003111 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003112
Chris Wilson38bde182012-04-24 22:59:50 +01003113 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3114
3115 /* Unmask the interrupts that we always want on. */
3116 dev_priv->irq_mask =
3117 ~(I915_ASLE_INTERRUPT |
3118 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3119 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3120 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3121 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3122 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3123
3124 enable_mask =
3125 I915_ASLE_INTERRUPT |
3126 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3127 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3128 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3129 I915_USER_INTERRUPT;
3130
Chris Wilsona266c7d2012-04-24 22:59:44 +01003131 if (I915_HAS_HOTPLUG(dev)) {
Daniel Vetter20afbda2012-12-11 14:05:07 +01003132 I915_WRITE(PORT_HOTPLUG_EN, 0);
3133 POSTING_READ(PORT_HOTPLUG_EN);
3134
Chris Wilsona266c7d2012-04-24 22:59:44 +01003135 /* Enable in IER... */
3136 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3137 /* and unmask in IMR */
3138 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3139 }
3140
Chris Wilsona266c7d2012-04-24 22:59:44 +01003141 I915_WRITE(IMR, dev_priv->irq_mask);
3142 I915_WRITE(IER, enable_mask);
3143 POSTING_READ(IER);
3144
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003145 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003146
3147 return 0;
3148}
3149
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003150/*
3151 * Returns true when a page flip has completed.
3152 */
3153static bool i915_handle_vblank(struct drm_device *dev,
3154 int plane, int pipe, u32 iir)
3155{
3156 drm_i915_private_t *dev_priv = dev->dev_private;
3157 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3158
3159 if (!drm_handle_vblank(dev, pipe))
3160 return false;
3161
3162 if ((iir & flip_pending) == 0)
3163 return false;
3164
3165 intel_prepare_page_flip(dev, plane);
3166
3167 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3168 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3169 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3170 * the flip is completed (no longer pending). Since this doesn't raise
3171 * an interrupt per se, we watch for the change at vblank.
3172 */
3173 if (I915_READ(ISR) & flip_pending)
3174 return false;
3175
3176 intel_finish_page_flip(dev, pipe);
3177
3178 return true;
3179}
3180
Daniel Vetterff1f5252012-10-02 15:10:55 +02003181static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003182{
3183 struct drm_device *dev = (struct drm_device *) arg;
3184 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilson8291ee92012-04-24 22:59:47 +01003185 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003186 unsigned long irqflags;
Chris Wilson38bde182012-04-24 22:59:50 +01003187 u32 flip_mask =
3188 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3189 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilson38bde182012-04-24 22:59:50 +01003190 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003191
3192 atomic_inc(&dev_priv->irq_received);
3193
3194 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01003195 do {
3196 bool irq_received = (iir & ~flip_mask) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01003197 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003198
3199 /* Can't rely on pipestat interrupt bit in iir as it might
3200 * have been cleared after the pipestat interrupt was received.
3201 * It doesn't set the bit in iir again, but it still produces
3202 * interrupts (for non-MSI).
3203 */
3204 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3205 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3206 i915_handle_error(dev, false);
3207
3208 for_each_pipe(pipe) {
3209 int reg = PIPESTAT(pipe);
3210 pipe_stats[pipe] = I915_READ(reg);
3211
Chris Wilson38bde182012-04-24 22:59:50 +01003212 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003213 if (pipe_stats[pipe] & 0x8000ffff) {
3214 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3215 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3216 pipe_name(pipe));
3217 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01003218 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003219 }
3220 }
3221 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3222
3223 if (!irq_received)
3224 break;
3225
Chris Wilsona266c7d2012-04-24 22:59:44 +01003226 /* Consume port. Then clear IIR or we'll miss events */
3227 if ((I915_HAS_HOTPLUG(dev)) &&
3228 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3229 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02003230 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003231
3232 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3233 hotplug_status);
Egbert Eichb543fb02013-04-16 13:36:54 +02003234 if (hotplug_trigger) {
Egbert Eichcd569ae2013-04-16 13:36:57 +02003235 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
3236 i915_hpd_irq_setup(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003237 queue_work(dev_priv->wq,
3238 &dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02003239 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003240 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
Chris Wilson38bde182012-04-24 22:59:50 +01003241 POSTING_READ(PORT_HOTPLUG_STAT);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003242 }
3243
Chris Wilson38bde182012-04-24 22:59:50 +01003244 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003245 new_iir = I915_READ(IIR); /* Flush posted writes */
3246
Chris Wilsona266c7d2012-04-24 22:59:44 +01003247 if (iir & I915_USER_INTERRUPT)
3248 notify_ring(dev, &dev_priv->ring[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003249
Chris Wilsona266c7d2012-04-24 22:59:44 +01003250 for_each_pipe(pipe) {
Chris Wilson38bde182012-04-24 22:59:50 +01003251 int plane = pipe;
3252 if (IS_MOBILE(dev))
3253 plane = !plane;
Ville Syrjälä5e2032d2013-02-19 15:16:38 +02003254
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003255 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3256 i915_handle_vblank(dev, plane, pipe, iir))
3257 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003258
3259 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3260 blc_event = true;
3261 }
3262
Chris Wilsona266c7d2012-04-24 22:59:44 +01003263 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3264 intel_opregion_asle_intr(dev);
3265
3266 /* With MSI, interrupts are only generated when iir
3267 * transitions from zero to nonzero. If another bit got
3268 * set while we were handling the existing iir bits, then
3269 * we would never get another interrupt.
3270 *
3271 * This is fine on non-MSI as well, as if we hit this path
3272 * we avoid exiting the interrupt handler only to generate
3273 * another one.
3274 *
3275 * Note that for MSI this could cause a stray interrupt report
3276 * if an interrupt landed in the time between writing IIR and
3277 * the posting read. This should be rare enough to never
3278 * trigger the 99% of 100,000 interrupts test for disabling
3279 * stray interrupts.
3280 */
Chris Wilson38bde182012-04-24 22:59:50 +01003281 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003282 iir = new_iir;
Chris Wilson38bde182012-04-24 22:59:50 +01003283 } while (iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003284
Daniel Vetterd05c6172012-04-26 23:28:09 +02003285 i915_update_dri1_breadcrumb(dev);
Chris Wilson8291ee92012-04-24 22:59:47 +01003286
Chris Wilsona266c7d2012-04-24 22:59:44 +01003287 return ret;
3288}
3289
3290static void i915_irq_uninstall(struct drm_device * dev)
3291{
3292 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3293 int pipe;
3294
Egbert Eichac4c16c2013-04-16 13:36:58 +02003295 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3296
Chris Wilsona266c7d2012-04-24 22:59:44 +01003297 if (I915_HAS_HOTPLUG(dev)) {
3298 I915_WRITE(PORT_HOTPLUG_EN, 0);
3299 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3300 }
3301
Chris Wilson00d98eb2012-04-24 22:59:48 +01003302 I915_WRITE16(HWSTAM, 0xffff);
Chris Wilson55b39752012-04-24 22:59:49 +01003303 for_each_pipe(pipe) {
3304 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003305 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01003306 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3307 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003308 I915_WRITE(IMR, 0xffffffff);
3309 I915_WRITE(IER, 0x0);
3310
Chris Wilsona266c7d2012-04-24 22:59:44 +01003311 I915_WRITE(IIR, I915_READ(IIR));
3312}
3313
3314static void i965_irq_preinstall(struct drm_device * dev)
3315{
3316 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3317 int pipe;
3318
3319 atomic_set(&dev_priv->irq_received, 0);
3320
Chris Wilsonadca4732012-05-11 18:01:31 +01003321 I915_WRITE(PORT_HOTPLUG_EN, 0);
3322 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003323
3324 I915_WRITE(HWSTAM, 0xeffe);
3325 for_each_pipe(pipe)
3326 I915_WRITE(PIPESTAT(pipe), 0);
3327 I915_WRITE(IMR, 0xffffffff);
3328 I915_WRITE(IER, 0x0);
3329 POSTING_READ(IER);
3330}
3331
3332static int i965_irq_postinstall(struct drm_device *dev)
3333{
3334 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003335 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003336 u32 error_mask;
3337
Chris Wilsona266c7d2012-04-24 22:59:44 +01003338 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003339 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01003340 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003341 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3342 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3343 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3344 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3345 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3346
3347 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003348 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3349 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003350 enable_mask |= I915_USER_INTERRUPT;
3351
3352 if (IS_G4X(dev))
3353 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003354
Daniel Vetter515ac2b2012-12-01 13:53:44 +01003355 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003356
Chris Wilsona266c7d2012-04-24 22:59:44 +01003357 /*
3358 * Enable some error detection, note the instruction error mask
3359 * bit is reserved, so we leave it masked.
3360 */
3361 if (IS_G4X(dev)) {
3362 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3363 GM45_ERROR_MEM_PRIV |
3364 GM45_ERROR_CP_PRIV |
3365 I915_ERROR_MEMORY_REFRESH);
3366 } else {
3367 error_mask = ~(I915_ERROR_PAGE_TABLE |
3368 I915_ERROR_MEMORY_REFRESH);
3369 }
3370 I915_WRITE(EMR, error_mask);
3371
3372 I915_WRITE(IMR, dev_priv->irq_mask);
3373 I915_WRITE(IER, enable_mask);
3374 POSTING_READ(IER);
3375
Daniel Vetter20afbda2012-12-11 14:05:07 +01003376 I915_WRITE(PORT_HOTPLUG_EN, 0);
3377 POSTING_READ(PORT_HOTPLUG_EN);
3378
Jani Nikulaf49e38d2013-04-29 13:02:54 +03003379 i915_enable_asle_pipestat(dev);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003380
3381 return 0;
3382}
3383
Egbert Eichbac56d52013-02-25 12:06:51 -05003384static void i915_hpd_irq_setup(struct drm_device *dev)
Daniel Vetter20afbda2012-12-11 14:05:07 +01003385{
3386 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Egbert Eiche5868a32013-02-28 04:17:12 -05003387 struct drm_mode_config *mode_config = &dev->mode_config;
Egbert Eichcd569ae2013-04-16 13:36:57 +02003388 struct intel_encoder *intel_encoder;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003389 u32 hotplug_en;
3390
Egbert Eichbac56d52013-02-25 12:06:51 -05003391 if (I915_HAS_HOTPLUG(dev)) {
3392 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3393 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3394 /* Note HDMI and DP share hotplug bits */
Egbert Eiche5868a32013-02-28 04:17:12 -05003395 /* enable bits are the same for all generations */
Egbert Eichcd569ae2013-04-16 13:36:57 +02003396 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3397 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3398 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
Egbert Eichbac56d52013-02-25 12:06:51 -05003399 /* Programming the CRT detection parameters tends
3400 to generate a spurious hotplug event about three
3401 seconds later. So just do it once.
3402 */
3403 if (IS_G4X(dev))
3404 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Daniel Vetter85fc95b2013-03-27 15:47:11 +01003405 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
Egbert Eichbac56d52013-02-25 12:06:51 -05003406 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003407
Egbert Eichbac56d52013-02-25 12:06:51 -05003408 /* Ignore TV since it's buggy */
3409 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3410 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003411}
3412
Daniel Vetterff1f5252012-10-02 15:10:55 +02003413static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003414{
3415 struct drm_device *dev = (struct drm_device *) arg;
3416 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003417 u32 iir, new_iir;
3418 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01003419 unsigned long irqflags;
3420 int irq_received;
3421 int ret = IRQ_NONE, pipe;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003422 u32 flip_mask =
3423 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3424 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003425
3426 atomic_inc(&dev_priv->irq_received);
3427
3428 iir = I915_READ(IIR);
3429
Chris Wilsona266c7d2012-04-24 22:59:44 +01003430 for (;;) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01003431 bool blc_event = false;
3432
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003433 irq_received = (iir & ~flip_mask) != 0;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003434
3435 /* Can't rely on pipestat interrupt bit in iir as it might
3436 * have been cleared after the pipestat interrupt was received.
3437 * It doesn't set the bit in iir again, but it still produces
3438 * interrupts (for non-MSI).
3439 */
3440 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3441 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3442 i915_handle_error(dev, false);
3443
3444 for_each_pipe(pipe) {
3445 int reg = PIPESTAT(pipe);
3446 pipe_stats[pipe] = I915_READ(reg);
3447
3448 /*
3449 * Clear the PIPE*STAT regs before the IIR
3450 */
3451 if (pipe_stats[pipe] & 0x8000ffff) {
3452 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3453 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3454 pipe_name(pipe));
3455 I915_WRITE(reg, pipe_stats[pipe]);
3456 irq_received = 1;
3457 }
3458 }
3459 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3460
3461 if (!irq_received)
3462 break;
3463
3464 ret = IRQ_HANDLED;
3465
3466 /* Consume port. Then clear IIR or we'll miss events */
Chris Wilsonadca4732012-05-11 18:01:31 +01003467 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003468 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Egbert Eichb543fb02013-04-16 13:36:54 +02003469 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3470 HOTPLUG_INT_STATUS_G4X :
Daniel Vetter4f7fd702013-06-24 21:33:28 +02003471 HOTPLUG_INT_STATUS_I915);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003472
3473 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3474 hotplug_status);
Egbert Eichb543fb02013-04-16 13:36:54 +02003475 if (hotplug_trigger) {
Egbert Eichcd569ae2013-04-16 13:36:57 +02003476 if (hotplug_irq_storm_detect(dev, hotplug_trigger,
Daniel Vetter4f7fd702013-06-24 21:33:28 +02003477 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
Egbert Eichcd569ae2013-04-16 13:36:57 +02003478 i915_hpd_irq_setup(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003479 queue_work(dev_priv->wq,
3480 &dev_priv->hotplug_work);
Egbert Eichb543fb02013-04-16 13:36:54 +02003481 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003482 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3483 I915_READ(PORT_HOTPLUG_STAT);
3484 }
3485
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003486 I915_WRITE(IIR, iir & ~flip_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003487 new_iir = I915_READ(IIR); /* Flush posted writes */
3488
Chris Wilsona266c7d2012-04-24 22:59:44 +01003489 if (iir & I915_USER_INTERRUPT)
3490 notify_ring(dev, &dev_priv->ring[RCS]);
3491 if (iir & I915_BSD_USER_INTERRUPT)
3492 notify_ring(dev, &dev_priv->ring[VCS]);
3493
Chris Wilsona266c7d2012-04-24 22:59:44 +01003494 for_each_pipe(pipe) {
Chris Wilson2c8ba292012-04-24 22:59:46 +01003495 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
Ville Syrjälä90a72f82013-02-19 23:16:44 +02003496 i915_handle_vblank(dev, pipe, pipe, iir))
3497 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003498
3499 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3500 blc_event = true;
3501 }
3502
3503
3504 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3505 intel_opregion_asle_intr(dev);
3506
Daniel Vetter515ac2b2012-12-01 13:53:44 +01003507 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3508 gmbus_irq_handler(dev);
3509
Chris Wilsona266c7d2012-04-24 22:59:44 +01003510 /* With MSI, interrupts are only generated when iir
3511 * transitions from zero to nonzero. If another bit got
3512 * set while we were handling the existing iir bits, then
3513 * we would never get another interrupt.
3514 *
3515 * This is fine on non-MSI as well, as if we hit this path
3516 * we avoid exiting the interrupt handler only to generate
3517 * another one.
3518 *
3519 * Note that for MSI this could cause a stray interrupt report
3520 * if an interrupt landed in the time between writing IIR and
3521 * the posting read. This should be rare enough to never
3522 * trigger the 99% of 100,000 interrupts test for disabling
3523 * stray interrupts.
3524 */
3525 iir = new_iir;
3526 }
3527
Daniel Vetterd05c6172012-04-26 23:28:09 +02003528 i915_update_dri1_breadcrumb(dev);
Chris Wilson2c8ba292012-04-24 22:59:46 +01003529
Chris Wilsona266c7d2012-04-24 22:59:44 +01003530 return ret;
3531}
3532
3533static void i965_irq_uninstall(struct drm_device * dev)
3534{
3535 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3536 int pipe;
3537
3538 if (!dev_priv)
3539 return;
3540
Egbert Eichac4c16c2013-04-16 13:36:58 +02003541 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3542
Chris Wilsonadca4732012-05-11 18:01:31 +01003543 I915_WRITE(PORT_HOTPLUG_EN, 0);
3544 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003545
3546 I915_WRITE(HWSTAM, 0xffffffff);
3547 for_each_pipe(pipe)
3548 I915_WRITE(PIPESTAT(pipe), 0);
3549 I915_WRITE(IMR, 0xffffffff);
3550 I915_WRITE(IER, 0x0);
3551
3552 for_each_pipe(pipe)
3553 I915_WRITE(PIPESTAT(pipe),
3554 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3555 I915_WRITE(IIR, I915_READ(IIR));
3556}
3557
Egbert Eichac4c16c2013-04-16 13:36:58 +02003558static void i915_reenable_hotplug_timer_func(unsigned long data)
3559{
3560 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3561 struct drm_device *dev = dev_priv->dev;
3562 struct drm_mode_config *mode_config = &dev->mode_config;
3563 unsigned long irqflags;
3564 int i;
3565
3566 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3567 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3568 struct drm_connector *connector;
3569
3570 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3571 continue;
3572
3573 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3574
3575 list_for_each_entry(connector, &mode_config->connector_list, head) {
3576 struct intel_connector *intel_connector = to_intel_connector(connector);
3577
3578 if (intel_connector->encoder->hpd_pin == i) {
3579 if (connector->polled != intel_connector->polled)
3580 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3581 drm_get_connector_name(connector));
3582 connector->polled = intel_connector->polled;
3583 if (!connector->polled)
3584 connector->polled = DRM_CONNECTOR_POLL_HPD;
3585 }
3586 }
3587 }
3588 if (dev_priv->display.hpd_irq_setup)
3589 dev_priv->display.hpd_irq_setup(dev);
3590 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3591}
3592
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003593void intel_irq_init(struct drm_device *dev)
3594{
Chris Wilson8b2e3262012-04-24 22:59:41 +01003595 struct drm_i915_private *dev_priv = dev->dev_private;
3596
3597 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
Daniel Vetter99584db2012-11-14 17:14:04 +01003598 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003599 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01003600 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Chris Wilson8b2e3262012-04-24 22:59:41 +01003601
Daniel Vetter99584db2012-11-14 17:14:04 +01003602 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3603 i915_hangcheck_elapsed,
Daniel Vetter61bac782012-12-01 21:03:21 +01003604 (unsigned long) dev);
Egbert Eichac4c16c2013-04-16 13:36:58 +02003605 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3606 (unsigned long) dev_priv);
Daniel Vetter61bac782012-12-01 21:03:21 +01003607
Tomas Janousek97a19a22012-12-08 13:48:13 +01003608 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01003609
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003610 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3611 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Eugeni Dodonov7d4e1462012-05-09 15:37:09 -03003612 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003613 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3614 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3615 }
3616
Keith Packardc3613de2011-08-12 17:05:54 -07003617 if (drm_core_check_feature(dev, DRIVER_MODESET))
3618 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3619 else
3620 dev->driver->get_vblank_timestamp = NULL;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003621 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3622
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003623 if (IS_VALLEYVIEW(dev)) {
3624 dev->driver->irq_handler = valleyview_irq_handler;
3625 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3626 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3627 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3628 dev->driver->enable_vblank = valleyview_enable_vblank;
3629 dev->driver->disable_vblank = valleyview_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05003630 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Daniel Vetter4a06e202012-12-01 13:53:40 +01003631 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
Ben Widawsky7d991632013-05-28 19:22:25 -07003632 /* Share uninstall handlers with ILK/SNB */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003633 dev->driver->irq_handler = ivybridge_irq_handler;
Ben Widawsky7d991632013-05-28 19:22:25 -07003634 dev->driver->irq_preinstall = ivybridge_irq_preinstall;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003635 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3636 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3637 dev->driver->enable_vblank = ivybridge_enable_vblank;
3638 dev->driver->disable_vblank = ivybridge_disable_vblank;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003639 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003640 } else if (HAS_PCH_SPLIT(dev)) {
3641 dev->driver->irq_handler = ironlake_irq_handler;
3642 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3643 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3644 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3645 dev->driver->enable_vblank = ironlake_enable_vblank;
3646 dev->driver->disable_vblank = ironlake_disable_vblank;
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003647 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003648 } else {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003649 if (INTEL_INFO(dev)->gen == 2) {
3650 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3651 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3652 dev->driver->irq_handler = i8xx_irq_handler;
3653 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003654 } else if (INTEL_INFO(dev)->gen == 3) {
3655 dev->driver->irq_preinstall = i915_irq_preinstall;
3656 dev->driver->irq_postinstall = i915_irq_postinstall;
3657 dev->driver->irq_uninstall = i915_irq_uninstall;
3658 dev->driver->irq_handler = i915_irq_handler;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003659 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003660 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003661 dev->driver->irq_preinstall = i965_irq_preinstall;
3662 dev->driver->irq_postinstall = i965_irq_postinstall;
3663 dev->driver->irq_uninstall = i965_irq_uninstall;
3664 dev->driver->irq_handler = i965_irq_handler;
Egbert Eichbac56d52013-02-25 12:06:51 -05003665 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003666 }
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003667 dev->driver->enable_vblank = i915_enable_vblank;
3668 dev->driver->disable_vblank = i915_disable_vblank;
3669 }
3670}
Daniel Vetter20afbda2012-12-11 14:05:07 +01003671
3672void intel_hpd_init(struct drm_device *dev)
3673{
3674 struct drm_i915_private *dev_priv = dev->dev_private;
Egbert Eich821450c2013-04-16 13:36:55 +02003675 struct drm_mode_config *mode_config = &dev->mode_config;
3676 struct drm_connector *connector;
3677 int i;
Daniel Vetter20afbda2012-12-11 14:05:07 +01003678
Egbert Eich821450c2013-04-16 13:36:55 +02003679 for (i = 1; i < HPD_NUM_PINS; i++) {
3680 dev_priv->hpd_stats[i].hpd_cnt = 0;
3681 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3682 }
3683 list_for_each_entry(connector, &mode_config->connector_list, head) {
3684 struct intel_connector *intel_connector = to_intel_connector(connector);
3685 connector->polled = intel_connector->polled;
3686 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3687 connector->polled = DRM_CONNECTOR_POLL_HPD;
3688 }
Daniel Vetter20afbda2012-12-11 14:05:07 +01003689 if (dev_priv->display.hpd_irq_setup)
3690 dev_priv->display.hpd_irq_setup(dev);
3691}