Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | */ |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 3 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
| 5 | * All Rights Reserved. |
Dave Airlie | bc54fd1 | 2005-06-23 22:46:46 +1000 | [diff] [blame] | 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 8 | * copy of this software and associated documentation files (the |
| 9 | * "Software"), to deal in the Software without restriction, including |
| 10 | * without limitation the rights to use, copy, modify, merge, publish, |
| 11 | * distribute, sub license, and/or sell copies of the Software, and to |
| 12 | * permit persons to whom the Software is furnished to do so, subject to |
| 13 | * the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice (including the |
| 16 | * next paragraph) shall be included in all copies or substantial portions |
| 17 | * of the Software. |
| 18 | * |
| 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
| 20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
| 22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
| 23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| 24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| 25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 26 | * |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 27 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 30 | |
Jesse Barnes | 63eeaf3 | 2009-06-18 16:56:52 -0700 | [diff] [blame] | 31 | #include <linux/sysrq.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 32 | #include <linux/slab.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 33 | #include <drm/drmP.h> |
| 34 | #include <drm/i915_drm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include "i915_drv.h" |
Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 36 | #include "i915_trace.h" |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 37 | #include "intel_drv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Egbert Eich | e5868a3 | 2013-02-28 04:17:12 -0500 | [diff] [blame] | 39 | static const u32 hpd_ibx[] = { |
| 40 | [HPD_CRT] = SDE_CRT_HOTPLUG, |
| 41 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, |
| 42 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG, |
| 43 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG, |
| 44 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG |
| 45 | }; |
| 46 | |
| 47 | static const u32 hpd_cpt[] = { |
| 48 | [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, |
Daniel Vetter | 73c352a | 2013-03-26 22:38:43 +0100 | [diff] [blame] | 49 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, |
Egbert Eich | e5868a3 | 2013-02-28 04:17:12 -0500 | [diff] [blame] | 50 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, |
| 51 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, |
| 52 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT |
| 53 | }; |
| 54 | |
| 55 | static const u32 hpd_mask_i915[] = { |
| 56 | [HPD_CRT] = CRT_HOTPLUG_INT_EN, |
| 57 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, |
| 58 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, |
| 59 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, |
| 60 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, |
| 61 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN |
| 62 | }; |
| 63 | |
| 64 | static const u32 hpd_status_gen4[] = { |
| 65 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, |
| 66 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, |
| 67 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, |
| 68 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, |
| 69 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, |
| 70 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS |
| 71 | }; |
| 72 | |
Egbert Eich | e5868a3 | 2013-02-28 04:17:12 -0500 | [diff] [blame] | 73 | static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ |
| 74 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, |
| 75 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, |
| 76 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, |
| 77 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, |
| 78 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, |
| 79 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS |
| 80 | }; |
| 81 | |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 82 | /* For display hotplug interrupt */ |
Chris Wilson | 995b676 | 2010-08-20 13:23:26 +0100 | [diff] [blame] | 83 | static void |
Adam Jackson | f2b115e | 2009-12-03 17:14:42 -0500 | [diff] [blame] | 84 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 85 | { |
Daniel Vetter | 4bc9d43 | 2013-06-27 13:44:58 +0200 | [diff] [blame] | 86 | assert_spin_locked(&dev_priv->irq_lock); |
| 87 | |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 88 | if (dev_priv->pc8.irqs_disabled) { |
| 89 | WARN(1, "IRQs disabled\n"); |
| 90 | dev_priv->pc8.regsave.deimr &= ~mask; |
| 91 | return; |
| 92 | } |
| 93 | |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 94 | if ((dev_priv->irq_mask & mask) != 0) { |
| 95 | dev_priv->irq_mask &= ~mask; |
| 96 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
Chris Wilson | 3143a2b | 2010-11-16 15:55:10 +0000 | [diff] [blame] | 97 | POSTING_READ(DEIMR); |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 98 | } |
| 99 | } |
| 100 | |
Paulo Zanoni | 0ff9800 | 2013-02-22 17:05:31 -0300 | [diff] [blame] | 101 | static void |
Adam Jackson | f2b115e | 2009-12-03 17:14:42 -0500 | [diff] [blame] | 102 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 103 | { |
Daniel Vetter | 4bc9d43 | 2013-06-27 13:44:58 +0200 | [diff] [blame] | 104 | assert_spin_locked(&dev_priv->irq_lock); |
| 105 | |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 106 | if (dev_priv->pc8.irqs_disabled) { |
| 107 | WARN(1, "IRQs disabled\n"); |
| 108 | dev_priv->pc8.regsave.deimr |= mask; |
| 109 | return; |
| 110 | } |
| 111 | |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 112 | if ((dev_priv->irq_mask & mask) != mask) { |
| 113 | dev_priv->irq_mask |= mask; |
| 114 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
Chris Wilson | 3143a2b | 2010-11-16 15:55:10 +0000 | [diff] [blame] | 115 | POSTING_READ(DEIMR); |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 116 | } |
| 117 | } |
| 118 | |
Paulo Zanoni | 43eaea1 | 2013-08-06 18:57:12 -0300 | [diff] [blame] | 119 | /** |
| 120 | * ilk_update_gt_irq - update GTIMR |
| 121 | * @dev_priv: driver private |
| 122 | * @interrupt_mask: mask of interrupt bits to update |
| 123 | * @enabled_irq_mask: mask of interrupt bits to enable |
| 124 | */ |
| 125 | static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, |
| 126 | uint32_t interrupt_mask, |
| 127 | uint32_t enabled_irq_mask) |
| 128 | { |
| 129 | assert_spin_locked(&dev_priv->irq_lock); |
| 130 | |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 131 | if (dev_priv->pc8.irqs_disabled) { |
| 132 | WARN(1, "IRQs disabled\n"); |
| 133 | dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; |
| 134 | dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & |
| 135 | interrupt_mask); |
| 136 | return; |
| 137 | } |
| 138 | |
Paulo Zanoni | 43eaea1 | 2013-08-06 18:57:12 -0300 | [diff] [blame] | 139 | dev_priv->gt_irq_mask &= ~interrupt_mask; |
| 140 | dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); |
| 141 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
| 142 | POSTING_READ(GTIMR); |
| 143 | } |
| 144 | |
| 145 | void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
| 146 | { |
| 147 | ilk_update_gt_irq(dev_priv, mask, mask); |
| 148 | } |
| 149 | |
| 150 | void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
| 151 | { |
| 152 | ilk_update_gt_irq(dev_priv, mask, 0); |
| 153 | } |
| 154 | |
Paulo Zanoni | edbfdb4 | 2013-08-06 18:57:13 -0300 | [diff] [blame] | 155 | /** |
| 156 | * snb_update_pm_irq - update GEN6_PMIMR |
| 157 | * @dev_priv: driver private |
| 158 | * @interrupt_mask: mask of interrupt bits to update |
| 159 | * @enabled_irq_mask: mask of interrupt bits to enable |
| 160 | */ |
| 161 | static void snb_update_pm_irq(struct drm_i915_private *dev_priv, |
| 162 | uint32_t interrupt_mask, |
| 163 | uint32_t enabled_irq_mask) |
| 164 | { |
Paulo Zanoni | 605cd25 | 2013-08-06 18:57:15 -0300 | [diff] [blame] | 165 | uint32_t new_val; |
Paulo Zanoni | edbfdb4 | 2013-08-06 18:57:13 -0300 | [diff] [blame] | 166 | |
| 167 | assert_spin_locked(&dev_priv->irq_lock); |
| 168 | |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 169 | if (dev_priv->pc8.irqs_disabled) { |
| 170 | WARN(1, "IRQs disabled\n"); |
| 171 | dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; |
| 172 | dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & |
| 173 | interrupt_mask); |
| 174 | return; |
| 175 | } |
| 176 | |
Paulo Zanoni | 605cd25 | 2013-08-06 18:57:15 -0300 | [diff] [blame] | 177 | new_val = dev_priv->pm_irq_mask; |
Paulo Zanoni | f52ecbc | 2013-08-06 18:57:14 -0300 | [diff] [blame] | 178 | new_val &= ~interrupt_mask; |
| 179 | new_val |= (~enabled_irq_mask & interrupt_mask); |
| 180 | |
Paulo Zanoni | 605cd25 | 2013-08-06 18:57:15 -0300 | [diff] [blame] | 181 | if (new_val != dev_priv->pm_irq_mask) { |
| 182 | dev_priv->pm_irq_mask = new_val; |
| 183 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); |
Paulo Zanoni | f52ecbc | 2013-08-06 18:57:14 -0300 | [diff] [blame] | 184 | POSTING_READ(GEN6_PMIMR); |
| 185 | } |
Paulo Zanoni | edbfdb4 | 2013-08-06 18:57:13 -0300 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
| 189 | { |
| 190 | snb_update_pm_irq(dev_priv, mask, mask); |
| 191 | } |
| 192 | |
| 193 | void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
| 194 | { |
| 195 | snb_update_pm_irq(dev_priv, mask, 0); |
| 196 | } |
| 197 | |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 198 | static bool ivb_can_enable_err_int(struct drm_device *dev) |
| 199 | { |
| 200 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 201 | struct intel_crtc *crtc; |
| 202 | enum pipe pipe; |
| 203 | |
Daniel Vetter | 4bc9d43 | 2013-06-27 13:44:58 +0200 | [diff] [blame] | 204 | assert_spin_locked(&dev_priv->irq_lock); |
| 205 | |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 206 | for_each_pipe(pipe) { |
| 207 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
| 208 | |
| 209 | if (crtc->cpu_fifo_underrun_disabled) |
| 210 | return false; |
| 211 | } |
| 212 | |
| 213 | return true; |
| 214 | } |
| 215 | |
| 216 | static bool cpt_can_enable_serr_int(struct drm_device *dev) |
| 217 | { |
| 218 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 219 | enum pipe pipe; |
| 220 | struct intel_crtc *crtc; |
| 221 | |
Daniel Vetter | fee884e | 2013-07-04 23:35:21 +0200 | [diff] [blame] | 222 | assert_spin_locked(&dev_priv->irq_lock); |
| 223 | |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 224 | for_each_pipe(pipe) { |
| 225 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
| 226 | |
| 227 | if (crtc->pch_fifo_underrun_disabled) |
| 228 | return false; |
| 229 | } |
| 230 | |
| 231 | return true; |
| 232 | } |
| 233 | |
| 234 | static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, |
| 235 | enum pipe pipe, bool enable) |
| 236 | { |
| 237 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 238 | uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : |
| 239 | DE_PIPEB_FIFO_UNDERRUN; |
| 240 | |
| 241 | if (enable) |
| 242 | ironlake_enable_display_irq(dev_priv, bit); |
| 243 | else |
| 244 | ironlake_disable_display_irq(dev_priv, bit); |
| 245 | } |
| 246 | |
| 247 | static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, |
Daniel Vetter | 7336df6 | 2013-07-09 22:59:16 +0200 | [diff] [blame] | 248 | enum pipe pipe, bool enable) |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 249 | { |
| 250 | struct drm_i915_private *dev_priv = dev->dev_private; |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 251 | if (enable) { |
Daniel Vetter | 7336df6 | 2013-07-09 22:59:16 +0200 | [diff] [blame] | 252 | I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); |
| 253 | |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 254 | if (!ivb_can_enable_err_int(dev)) |
| 255 | return; |
| 256 | |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 257 | ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); |
| 258 | } else { |
Daniel Vetter | 7336df6 | 2013-07-09 22:59:16 +0200 | [diff] [blame] | 259 | bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); |
| 260 | |
| 261 | /* Change the state _after_ we've read out the current one. */ |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 262 | ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); |
Daniel Vetter | 7336df6 | 2013-07-09 22:59:16 +0200 | [diff] [blame] | 263 | |
| 264 | if (!was_enabled && |
| 265 | (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { |
| 266 | DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", |
| 267 | pipe_name(pipe)); |
| 268 | } |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 269 | } |
| 270 | } |
| 271 | |
Daniel Vetter | fee884e | 2013-07-04 23:35:21 +0200 | [diff] [blame] | 272 | /** |
| 273 | * ibx_display_interrupt_update - update SDEIMR |
| 274 | * @dev_priv: driver private |
| 275 | * @interrupt_mask: mask of interrupt bits to update |
| 276 | * @enabled_irq_mask: mask of interrupt bits to enable |
| 277 | */ |
| 278 | static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, |
| 279 | uint32_t interrupt_mask, |
| 280 | uint32_t enabled_irq_mask) |
| 281 | { |
| 282 | uint32_t sdeimr = I915_READ(SDEIMR); |
| 283 | sdeimr &= ~interrupt_mask; |
| 284 | sdeimr |= (~enabled_irq_mask & interrupt_mask); |
| 285 | |
| 286 | assert_spin_locked(&dev_priv->irq_lock); |
| 287 | |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 288 | if (dev_priv->pc8.irqs_disabled && |
| 289 | (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { |
| 290 | WARN(1, "IRQs disabled\n"); |
| 291 | dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; |
| 292 | dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & |
| 293 | interrupt_mask); |
| 294 | return; |
| 295 | } |
| 296 | |
Daniel Vetter | fee884e | 2013-07-04 23:35:21 +0200 | [diff] [blame] | 297 | I915_WRITE(SDEIMR, sdeimr); |
| 298 | POSTING_READ(SDEIMR); |
| 299 | } |
| 300 | #define ibx_enable_display_interrupt(dev_priv, bits) \ |
| 301 | ibx_display_interrupt_update((dev_priv), (bits), (bits)) |
| 302 | #define ibx_disable_display_interrupt(dev_priv, bits) \ |
| 303 | ibx_display_interrupt_update((dev_priv), (bits), 0) |
| 304 | |
Daniel Vetter | de28075 | 2013-07-04 23:35:24 +0200 | [diff] [blame] | 305 | static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, |
| 306 | enum transcoder pch_transcoder, |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 307 | bool enable) |
| 308 | { |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 309 | struct drm_i915_private *dev_priv = dev->dev_private; |
Daniel Vetter | de28075 | 2013-07-04 23:35:24 +0200 | [diff] [blame] | 310 | uint32_t bit = (pch_transcoder == TRANSCODER_A) ? |
| 311 | SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 312 | |
| 313 | if (enable) |
Daniel Vetter | fee884e | 2013-07-04 23:35:21 +0200 | [diff] [blame] | 314 | ibx_enable_display_interrupt(dev_priv, bit); |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 315 | else |
Daniel Vetter | fee884e | 2013-07-04 23:35:21 +0200 | [diff] [blame] | 316 | ibx_disable_display_interrupt(dev_priv, bit); |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 317 | } |
| 318 | |
| 319 | static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, |
| 320 | enum transcoder pch_transcoder, |
| 321 | bool enable) |
| 322 | { |
| 323 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 324 | |
| 325 | if (enable) { |
Daniel Vetter | 1dd246f | 2013-07-10 08:30:23 +0200 | [diff] [blame] | 326 | I915_WRITE(SERR_INT, |
| 327 | SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); |
| 328 | |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 329 | if (!cpt_can_enable_serr_int(dev)) |
| 330 | return; |
| 331 | |
Daniel Vetter | fee884e | 2013-07-04 23:35:21 +0200 | [diff] [blame] | 332 | ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 333 | } else { |
Daniel Vetter | 1dd246f | 2013-07-10 08:30:23 +0200 | [diff] [blame] | 334 | uint32_t tmp = I915_READ(SERR_INT); |
| 335 | bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); |
| 336 | |
| 337 | /* Change the state _after_ we've read out the current one. */ |
Daniel Vetter | fee884e | 2013-07-04 23:35:21 +0200 | [diff] [blame] | 338 | ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); |
Daniel Vetter | 1dd246f | 2013-07-10 08:30:23 +0200 | [diff] [blame] | 339 | |
| 340 | if (!was_enabled && |
| 341 | (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { |
| 342 | DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", |
| 343 | transcoder_name(pch_transcoder)); |
| 344 | } |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 345 | } |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 346 | } |
| 347 | |
| 348 | /** |
| 349 | * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages |
| 350 | * @dev: drm device |
| 351 | * @pipe: pipe |
| 352 | * @enable: true if we want to report FIFO underrun errors, false otherwise |
| 353 | * |
| 354 | * This function makes us disable or enable CPU fifo underruns for a specific |
| 355 | * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun |
| 356 | * reporting for one pipe may also disable all the other CPU error interruts for |
| 357 | * the other pipes, due to the fact that there's just one interrupt mask/enable |
| 358 | * bit for all the pipes. |
| 359 | * |
| 360 | * Returns the previous state of underrun reporting. |
| 361 | */ |
| 362 | bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, |
| 363 | enum pipe pipe, bool enable) |
| 364 | { |
| 365 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 366 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
| 367 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 368 | unsigned long flags; |
| 369 | bool ret; |
| 370 | |
| 371 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
| 372 | |
| 373 | ret = !intel_crtc->cpu_fifo_underrun_disabled; |
| 374 | |
| 375 | if (enable == ret) |
| 376 | goto done; |
| 377 | |
| 378 | intel_crtc->cpu_fifo_underrun_disabled = !enable; |
| 379 | |
| 380 | if (IS_GEN5(dev) || IS_GEN6(dev)) |
| 381 | ironlake_set_fifo_underrun_reporting(dev, pipe, enable); |
| 382 | else if (IS_GEN7(dev)) |
Daniel Vetter | 7336df6 | 2013-07-09 22:59:16 +0200 | [diff] [blame] | 383 | ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 384 | |
| 385 | done: |
| 386 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
| 387 | return ret; |
| 388 | } |
| 389 | |
| 390 | /** |
| 391 | * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages |
| 392 | * @dev: drm device |
| 393 | * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) |
| 394 | * @enable: true if we want to report FIFO underrun errors, false otherwise |
| 395 | * |
| 396 | * This function makes us disable or enable PCH fifo underruns for a specific |
| 397 | * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO |
| 398 | * underrun reporting for one transcoder may also disable all the other PCH |
| 399 | * error interruts for the other transcoders, due to the fact that there's just |
| 400 | * one interrupt mask/enable bit for all the transcoders. |
| 401 | * |
| 402 | * Returns the previous state of underrun reporting. |
| 403 | */ |
| 404 | bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, |
| 405 | enum transcoder pch_transcoder, |
| 406 | bool enable) |
| 407 | { |
| 408 | struct drm_i915_private *dev_priv = dev->dev_private; |
Daniel Vetter | de28075 | 2013-07-04 23:35:24 +0200 | [diff] [blame] | 409 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; |
| 410 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 411 | unsigned long flags; |
| 412 | bool ret; |
| 413 | |
Daniel Vetter | de28075 | 2013-07-04 23:35:24 +0200 | [diff] [blame] | 414 | /* |
| 415 | * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT |
| 416 | * has only one pch transcoder A that all pipes can use. To avoid racy |
| 417 | * pch transcoder -> pipe lookups from interrupt code simply store the |
| 418 | * underrun statistics in crtc A. Since we never expose this anywhere |
| 419 | * nor use it outside of the fifo underrun code here using the "wrong" |
| 420 | * crtc on LPT won't cause issues. |
| 421 | */ |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 422 | |
| 423 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
| 424 | |
| 425 | ret = !intel_crtc->pch_fifo_underrun_disabled; |
| 426 | |
| 427 | if (enable == ret) |
| 428 | goto done; |
| 429 | |
| 430 | intel_crtc->pch_fifo_underrun_disabled = !enable; |
| 431 | |
| 432 | if (HAS_PCH_IBX(dev)) |
Daniel Vetter | de28075 | 2013-07-04 23:35:24 +0200 | [diff] [blame] | 433 | ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 434 | else |
| 435 | cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); |
| 436 | |
| 437 | done: |
| 438 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
| 439 | return ret; |
| 440 | } |
| 441 | |
| 442 | |
Keith Packard | 7c46358 | 2008-11-04 02:03:27 -0800 | [diff] [blame] | 443 | void |
| 444 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) |
| 445 | { |
Ville Syrjälä | 46c06a3 | 2013-02-20 21:16:18 +0200 | [diff] [blame] | 446 | u32 reg = PIPESTAT(pipe); |
| 447 | u32 pipestat = I915_READ(reg) & 0x7fff0000; |
Keith Packard | 7c46358 | 2008-11-04 02:03:27 -0800 | [diff] [blame] | 448 | |
Daniel Vetter | b79480b | 2013-06-27 17:52:10 +0200 | [diff] [blame] | 449 | assert_spin_locked(&dev_priv->irq_lock); |
| 450 | |
Ville Syrjälä | 46c06a3 | 2013-02-20 21:16:18 +0200 | [diff] [blame] | 451 | if ((pipestat & mask) == mask) |
| 452 | return; |
| 453 | |
| 454 | /* Enable the interrupt, clear any pending status */ |
| 455 | pipestat |= mask | (mask >> 16); |
| 456 | I915_WRITE(reg, pipestat); |
| 457 | POSTING_READ(reg); |
Keith Packard | 7c46358 | 2008-11-04 02:03:27 -0800 | [diff] [blame] | 458 | } |
| 459 | |
| 460 | void |
| 461 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) |
| 462 | { |
Ville Syrjälä | 46c06a3 | 2013-02-20 21:16:18 +0200 | [diff] [blame] | 463 | u32 reg = PIPESTAT(pipe); |
| 464 | u32 pipestat = I915_READ(reg) & 0x7fff0000; |
Keith Packard | 7c46358 | 2008-11-04 02:03:27 -0800 | [diff] [blame] | 465 | |
Daniel Vetter | b79480b | 2013-06-27 17:52:10 +0200 | [diff] [blame] | 466 | assert_spin_locked(&dev_priv->irq_lock); |
| 467 | |
Ville Syrjälä | 46c06a3 | 2013-02-20 21:16:18 +0200 | [diff] [blame] | 468 | if ((pipestat & mask) == 0) |
| 469 | return; |
| 470 | |
| 471 | pipestat &= ~mask; |
| 472 | I915_WRITE(reg, pipestat); |
| 473 | POSTING_READ(reg); |
Keith Packard | 7c46358 | 2008-11-04 02:03:27 -0800 | [diff] [blame] | 474 | } |
| 475 | |
=?utf-8?q?Michel_D=C3=A4nzer?= | a6b54f3 | 2006-10-24 23:37:43 +1000 | [diff] [blame] | 476 | /** |
Jani Nikula | f49e38d | 2013-04-29 13:02:54 +0300 | [diff] [blame] | 477 | * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion |
Zhao Yakui | 01c6688 | 2009-10-28 05:10:00 +0000 | [diff] [blame] | 478 | */ |
Jani Nikula | f49e38d | 2013-04-29 13:02:54 +0300 | [diff] [blame] | 479 | static void i915_enable_asle_pipestat(struct drm_device *dev) |
Zhao Yakui | 01c6688 | 2009-10-28 05:10:00 +0000 | [diff] [blame] | 480 | { |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 481 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 482 | unsigned long irqflags; |
| 483 | |
Jani Nikula | f49e38d | 2013-04-29 13:02:54 +0300 | [diff] [blame] | 484 | if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) |
| 485 | return; |
| 486 | |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 487 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
Zhao Yakui | 01c6688 | 2009-10-28 05:10:00 +0000 | [diff] [blame] | 488 | |
Jani Nikula | f898780 | 2013-04-29 13:02:53 +0300 | [diff] [blame] | 489 | i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); |
| 490 | if (INTEL_INFO(dev)->gen >= 4) |
| 491 | i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 492 | |
| 493 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
Zhao Yakui | 01c6688 | 2009-10-28 05:10:00 +0000 | [diff] [blame] | 494 | } |
| 495 | |
| 496 | /** |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 497 | * i915_pipe_enabled - check if a pipe is enabled |
| 498 | * @dev: DRM device |
| 499 | * @pipe: pipe to check |
| 500 | * |
| 501 | * Reading certain registers when the pipe is disabled can hang the chip. |
| 502 | * Use this routine to make sure the PLL is running and the pipe is active |
| 503 | * before reading such registers if unsure. |
| 504 | */ |
| 505 | static int |
| 506 | i915_pipe_enabled(struct drm_device *dev, int pipe) |
| 507 | { |
| 508 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Paulo Zanoni | 702e7a5 | 2012-10-23 18:29:59 -0200 | [diff] [blame] | 509 | |
Daniel Vetter | a01025a | 2013-05-22 00:50:23 +0200 | [diff] [blame] | 510 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
| 511 | /* Locking is horribly broken here, but whatever. */ |
| 512 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
| 513 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
Paulo Zanoni | 71f8ba6 | 2013-05-03 12:15:39 -0300 | [diff] [blame] | 514 | |
Daniel Vetter | a01025a | 2013-05-22 00:50:23 +0200 | [diff] [blame] | 515 | return intel_crtc->active; |
| 516 | } else { |
| 517 | return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; |
| 518 | } |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 519 | } |
| 520 | |
Keith Packard | 42f52ef | 2008-10-18 19:39:29 -0700 | [diff] [blame] | 521 | /* Called from drm generic code, passed a 'crtc', which |
| 522 | * we use as a pipe index |
| 523 | */ |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 524 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 525 | { |
| 526 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 527 | unsigned long high_frame; |
| 528 | unsigned long low_frame; |
Chris Wilson | 5eddb70 | 2010-09-11 13:48:45 +0100 | [diff] [blame] | 529 | u32 high1, high2, low; |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 530 | |
| 531 | if (!i915_pipe_enabled(dev, pipe)) { |
Zhao Yakui | 44d98a6 | 2009-10-09 11:39:40 +0800 | [diff] [blame] | 532 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 533 | "pipe %c\n", pipe_name(pipe)); |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 534 | return 0; |
| 535 | } |
| 536 | |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 537 | high_frame = PIPEFRAME(pipe); |
| 538 | low_frame = PIPEFRAMEPIXEL(pipe); |
Chris Wilson | 5eddb70 | 2010-09-11 13:48:45 +0100 | [diff] [blame] | 539 | |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 540 | /* |
| 541 | * High & low register fields aren't synchronized, so make sure |
| 542 | * we get a low value that's stable across two reads of the high |
| 543 | * register. |
| 544 | */ |
| 545 | do { |
Chris Wilson | 5eddb70 | 2010-09-11 13:48:45 +0100 | [diff] [blame] | 546 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
| 547 | low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; |
| 548 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 549 | } while (high1 != high2); |
| 550 | |
Chris Wilson | 5eddb70 | 2010-09-11 13:48:45 +0100 | [diff] [blame] | 551 | high1 >>= PIPE_FRAME_HIGH_SHIFT; |
| 552 | low >>= PIPE_FRAME_LOW_SHIFT; |
| 553 | return (high1 << 8) | low; |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 554 | } |
| 555 | |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 556 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
Jesse Barnes | 9880b7a | 2009-02-06 10:22:41 -0800 | [diff] [blame] | 557 | { |
| 558 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 559 | int reg = PIPE_FRMCOUNT_GM45(pipe); |
Jesse Barnes | 9880b7a | 2009-02-06 10:22:41 -0800 | [diff] [blame] | 560 | |
| 561 | if (!i915_pipe_enabled(dev, pipe)) { |
Zhao Yakui | 44d98a6 | 2009-10-09 11:39:40 +0800 | [diff] [blame] | 562 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 563 | "pipe %c\n", pipe_name(pipe)); |
Jesse Barnes | 9880b7a | 2009-02-06 10:22:41 -0800 | [diff] [blame] | 564 | return 0; |
| 565 | } |
| 566 | |
| 567 | return I915_READ(reg); |
| 568 | } |
| 569 | |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 570 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
Mario Kleiner | 0af7e4d | 2010-12-08 04:07:19 +0100 | [diff] [blame] | 571 | int *vpos, int *hpos) |
| 572 | { |
| 573 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 574 | u32 vbl = 0, position = 0; |
| 575 | int vbl_start, vbl_end, htotal, vtotal; |
| 576 | bool in_vbl = true; |
| 577 | int ret = 0; |
Paulo Zanoni | fe2b8f9 | 2012-10-23 18:30:02 -0200 | [diff] [blame] | 578 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
| 579 | pipe); |
Mario Kleiner | 0af7e4d | 2010-12-08 04:07:19 +0100 | [diff] [blame] | 580 | |
| 581 | if (!i915_pipe_enabled(dev, pipe)) { |
| 582 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 583 | "pipe %c\n", pipe_name(pipe)); |
Mario Kleiner | 0af7e4d | 2010-12-08 04:07:19 +0100 | [diff] [blame] | 584 | return 0; |
| 585 | } |
| 586 | |
| 587 | /* Get vtotal. */ |
Paulo Zanoni | fe2b8f9 | 2012-10-23 18:30:02 -0200 | [diff] [blame] | 588 | vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
Mario Kleiner | 0af7e4d | 2010-12-08 04:07:19 +0100 | [diff] [blame] | 589 | |
| 590 | if (INTEL_INFO(dev)->gen >= 4) { |
| 591 | /* No obvious pixelcount register. Only query vertical |
| 592 | * scanout position from Display scan line register. |
| 593 | */ |
| 594 | position = I915_READ(PIPEDSL(pipe)); |
| 595 | |
| 596 | /* Decode into vertical scanout position. Don't have |
| 597 | * horizontal scanout position. |
| 598 | */ |
| 599 | *vpos = position & 0x1fff; |
| 600 | *hpos = 0; |
| 601 | } else { |
| 602 | /* Have access to pixelcount since start of frame. |
| 603 | * We can split this into vertical and horizontal |
| 604 | * scanout position. |
| 605 | */ |
| 606 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; |
| 607 | |
Paulo Zanoni | fe2b8f9 | 2012-10-23 18:30:02 -0200 | [diff] [blame] | 608 | htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
Mario Kleiner | 0af7e4d | 2010-12-08 04:07:19 +0100 | [diff] [blame] | 609 | *vpos = position / htotal; |
| 610 | *hpos = position - (*vpos * htotal); |
| 611 | } |
| 612 | |
| 613 | /* Query vblank area. */ |
Paulo Zanoni | fe2b8f9 | 2012-10-23 18:30:02 -0200 | [diff] [blame] | 614 | vbl = I915_READ(VBLANK(cpu_transcoder)); |
Mario Kleiner | 0af7e4d | 2010-12-08 04:07:19 +0100 | [diff] [blame] | 615 | |
| 616 | /* Test position against vblank region. */ |
| 617 | vbl_start = vbl & 0x1fff; |
| 618 | vbl_end = (vbl >> 16) & 0x1fff; |
| 619 | |
| 620 | if ((*vpos < vbl_start) || (*vpos > vbl_end)) |
| 621 | in_vbl = false; |
| 622 | |
| 623 | /* Inside "upper part" of vblank area? Apply corrective offset: */ |
| 624 | if (in_vbl && (*vpos >= vbl_start)) |
| 625 | *vpos = *vpos - vtotal; |
| 626 | |
| 627 | /* Readouts valid? */ |
| 628 | if (vbl > 0) |
| 629 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; |
| 630 | |
| 631 | /* In vblank? */ |
| 632 | if (in_vbl) |
| 633 | ret |= DRM_SCANOUTPOS_INVBL; |
| 634 | |
| 635 | return ret; |
| 636 | } |
| 637 | |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 638 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, |
Mario Kleiner | 0af7e4d | 2010-12-08 04:07:19 +0100 | [diff] [blame] | 639 | int *max_error, |
| 640 | struct timeval *vblank_time, |
| 641 | unsigned flags) |
| 642 | { |
Chris Wilson | 4041b85 | 2011-01-22 10:07:56 +0000 | [diff] [blame] | 643 | struct drm_crtc *crtc; |
Mario Kleiner | 0af7e4d | 2010-12-08 04:07:19 +0100 | [diff] [blame] | 644 | |
Ben Widawsky | 7eb552a | 2013-03-13 14:05:41 -0700 | [diff] [blame] | 645 | if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { |
Chris Wilson | 4041b85 | 2011-01-22 10:07:56 +0000 | [diff] [blame] | 646 | DRM_ERROR("Invalid crtc %d\n", pipe); |
Mario Kleiner | 0af7e4d | 2010-12-08 04:07:19 +0100 | [diff] [blame] | 647 | return -EINVAL; |
| 648 | } |
| 649 | |
| 650 | /* Get drm_crtc to timestamp: */ |
Chris Wilson | 4041b85 | 2011-01-22 10:07:56 +0000 | [diff] [blame] | 651 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
| 652 | if (crtc == NULL) { |
| 653 | DRM_ERROR("Invalid crtc %d\n", pipe); |
| 654 | return -EINVAL; |
| 655 | } |
| 656 | |
| 657 | if (!crtc->enabled) { |
| 658 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); |
| 659 | return -EBUSY; |
| 660 | } |
Mario Kleiner | 0af7e4d | 2010-12-08 04:07:19 +0100 | [diff] [blame] | 661 | |
| 662 | /* Helper routine in DRM core does all the work: */ |
Chris Wilson | 4041b85 | 2011-01-22 10:07:56 +0000 | [diff] [blame] | 663 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, |
| 664 | vblank_time, flags, |
| 665 | crtc); |
Mario Kleiner | 0af7e4d | 2010-12-08 04:07:19 +0100 | [diff] [blame] | 666 | } |
| 667 | |
Egbert Eich | 321a1b3 | 2013-04-11 16:00:26 +0200 | [diff] [blame] | 668 | static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) |
| 669 | { |
| 670 | enum drm_connector_status old_status; |
| 671 | |
| 672 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); |
| 673 | old_status = connector->status; |
| 674 | |
| 675 | connector->status = connector->funcs->detect(connector, false); |
| 676 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", |
| 677 | connector->base.id, |
| 678 | drm_get_connector_name(connector), |
| 679 | old_status, connector->status); |
| 680 | return (old_status != connector->status); |
| 681 | } |
| 682 | |
Jesse Barnes | 5ca5828 | 2009-03-31 14:11:15 -0700 | [diff] [blame] | 683 | /* |
| 684 | * Handle hotplug events outside the interrupt handler proper. |
| 685 | */ |
Egbert Eich | ac4c16c | 2013-04-16 13:36:58 +0200 | [diff] [blame] | 686 | #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) |
| 687 | |
Jesse Barnes | 5ca5828 | 2009-03-31 14:11:15 -0700 | [diff] [blame] | 688 | static void i915_hotplug_work_func(struct work_struct *work) |
| 689 | { |
| 690 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
| 691 | hotplug_work); |
| 692 | struct drm_device *dev = dev_priv->dev; |
Keith Packard | c31c4ba | 2009-05-06 11:48:58 -0700 | [diff] [blame] | 693 | struct drm_mode_config *mode_config = &dev->mode_config; |
Egbert Eich | cd569ae | 2013-04-16 13:36:57 +0200 | [diff] [blame] | 694 | struct intel_connector *intel_connector; |
| 695 | struct intel_encoder *intel_encoder; |
| 696 | struct drm_connector *connector; |
| 697 | unsigned long irqflags; |
| 698 | bool hpd_disabled = false; |
Egbert Eich | 321a1b3 | 2013-04-11 16:00:26 +0200 | [diff] [blame] | 699 | bool changed = false; |
Egbert Eich | 142e239 | 2013-04-11 15:57:57 +0200 | [diff] [blame] | 700 | u32 hpd_event_bits; |
Jesse Barnes | 5ca5828 | 2009-03-31 14:11:15 -0700 | [diff] [blame] | 701 | |
Daniel Vetter | 52d7ece | 2012-12-01 21:03:22 +0100 | [diff] [blame] | 702 | /* HPD irq before everything is fully set up. */ |
| 703 | if (!dev_priv->enable_hotplug_processing) |
| 704 | return; |
| 705 | |
Keith Packard | a65e34c | 2011-07-25 10:04:56 -0700 | [diff] [blame] | 706 | mutex_lock(&mode_config->mutex); |
Jesse Barnes | e67189ab | 2011-02-11 14:44:51 -0800 | [diff] [blame] | 707 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
| 708 | |
Egbert Eich | cd569ae | 2013-04-16 13:36:57 +0200 | [diff] [blame] | 709 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
Egbert Eich | 142e239 | 2013-04-11 15:57:57 +0200 | [diff] [blame] | 710 | |
| 711 | hpd_event_bits = dev_priv->hpd_event_bits; |
| 712 | dev_priv->hpd_event_bits = 0; |
Egbert Eich | cd569ae | 2013-04-16 13:36:57 +0200 | [diff] [blame] | 713 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
| 714 | intel_connector = to_intel_connector(connector); |
| 715 | intel_encoder = intel_connector->encoder; |
| 716 | if (intel_encoder->hpd_pin > HPD_NONE && |
| 717 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && |
| 718 | connector->polled == DRM_CONNECTOR_POLL_HPD) { |
| 719 | DRM_INFO("HPD interrupt storm detected on connector %s: " |
| 720 | "switching from hotplug detection to polling\n", |
| 721 | drm_get_connector_name(connector)); |
| 722 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; |
| 723 | connector->polled = DRM_CONNECTOR_POLL_CONNECT |
| 724 | | DRM_CONNECTOR_POLL_DISCONNECT; |
| 725 | hpd_disabled = true; |
| 726 | } |
Egbert Eich | 142e239 | 2013-04-11 15:57:57 +0200 | [diff] [blame] | 727 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { |
| 728 | DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", |
| 729 | drm_get_connector_name(connector), intel_encoder->hpd_pin); |
| 730 | } |
Egbert Eich | cd569ae | 2013-04-16 13:36:57 +0200 | [diff] [blame] | 731 | } |
| 732 | /* if there were no outputs to poll, poll was disabled, |
| 733 | * therefore make sure it's enabled when disabling HPD on |
| 734 | * some connectors */ |
Egbert Eich | ac4c16c | 2013-04-16 13:36:58 +0200 | [diff] [blame] | 735 | if (hpd_disabled) { |
Egbert Eich | cd569ae | 2013-04-16 13:36:57 +0200 | [diff] [blame] | 736 | drm_kms_helper_poll_enable(dev); |
Egbert Eich | ac4c16c | 2013-04-16 13:36:58 +0200 | [diff] [blame] | 737 | mod_timer(&dev_priv->hotplug_reenable_timer, |
| 738 | jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); |
| 739 | } |
Egbert Eich | cd569ae | 2013-04-16 13:36:57 +0200 | [diff] [blame] | 740 | |
| 741 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 742 | |
Egbert Eich | 321a1b3 | 2013-04-11 16:00:26 +0200 | [diff] [blame] | 743 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
| 744 | intel_connector = to_intel_connector(connector); |
| 745 | intel_encoder = intel_connector->encoder; |
| 746 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { |
| 747 | if (intel_encoder->hot_plug) |
| 748 | intel_encoder->hot_plug(intel_encoder); |
| 749 | if (intel_hpd_irq_event(dev, connector)) |
| 750 | changed = true; |
| 751 | } |
| 752 | } |
Keith Packard | 40ee338 | 2011-07-28 15:31:19 -0700 | [diff] [blame] | 753 | mutex_unlock(&mode_config->mutex); |
| 754 | |
Egbert Eich | 321a1b3 | 2013-04-11 16:00:26 +0200 | [diff] [blame] | 755 | if (changed) |
| 756 | drm_kms_helper_hotplug_event(dev); |
Jesse Barnes | 5ca5828 | 2009-03-31 14:11:15 -0700 | [diff] [blame] | 757 | } |
| 758 | |
Daniel Vetter | d0ecd7e | 2013-07-04 23:35:25 +0200 | [diff] [blame] | 759 | static void ironlake_rps_change_irq_handler(struct drm_device *dev) |
Jesse Barnes | f97108d | 2010-01-29 11:27:07 -0800 | [diff] [blame] | 760 | { |
| 761 | drm_i915_private_t *dev_priv = dev->dev_private; |
Matthew Garrett | b5b72e8 | 2010-02-02 18:30:47 +0000 | [diff] [blame] | 762 | u32 busy_up, busy_down, max_avg, min_avg; |
Daniel Vetter | 9270388 | 2012-08-09 16:46:01 +0200 | [diff] [blame] | 763 | u8 new_delay; |
Daniel Vetter | 9270388 | 2012-08-09 16:46:01 +0200 | [diff] [blame] | 764 | |
Daniel Vetter | d0ecd7e | 2013-07-04 23:35:25 +0200 | [diff] [blame] | 765 | spin_lock(&mchdev_lock); |
Jesse Barnes | f97108d | 2010-01-29 11:27:07 -0800 | [diff] [blame] | 766 | |
Daniel Vetter | 73edd18f | 2012-08-08 23:35:37 +0200 | [diff] [blame] | 767 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
| 768 | |
Daniel Vetter | 20e4d40 | 2012-08-08 23:35:39 +0200 | [diff] [blame] | 769 | new_delay = dev_priv->ips.cur_delay; |
Daniel Vetter | 9270388 | 2012-08-09 16:46:01 +0200 | [diff] [blame] | 770 | |
Jesse Barnes | 7648fa9 | 2010-05-20 14:28:11 -0700 | [diff] [blame] | 771 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); |
Matthew Garrett | b5b72e8 | 2010-02-02 18:30:47 +0000 | [diff] [blame] | 772 | busy_up = I915_READ(RCPREVBSYTUPAVG); |
| 773 | busy_down = I915_READ(RCPREVBSYTDNAVG); |
Jesse Barnes | f97108d | 2010-01-29 11:27:07 -0800 | [diff] [blame] | 774 | max_avg = I915_READ(RCBMAXAVG); |
| 775 | min_avg = I915_READ(RCBMINAVG); |
| 776 | |
| 777 | /* Handle RCS change request from hw */ |
Matthew Garrett | b5b72e8 | 2010-02-02 18:30:47 +0000 | [diff] [blame] | 778 | if (busy_up > max_avg) { |
Daniel Vetter | 20e4d40 | 2012-08-08 23:35:39 +0200 | [diff] [blame] | 779 | if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) |
| 780 | new_delay = dev_priv->ips.cur_delay - 1; |
| 781 | if (new_delay < dev_priv->ips.max_delay) |
| 782 | new_delay = dev_priv->ips.max_delay; |
Matthew Garrett | b5b72e8 | 2010-02-02 18:30:47 +0000 | [diff] [blame] | 783 | } else if (busy_down < min_avg) { |
Daniel Vetter | 20e4d40 | 2012-08-08 23:35:39 +0200 | [diff] [blame] | 784 | if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) |
| 785 | new_delay = dev_priv->ips.cur_delay + 1; |
| 786 | if (new_delay > dev_priv->ips.min_delay) |
| 787 | new_delay = dev_priv->ips.min_delay; |
Jesse Barnes | f97108d | 2010-01-29 11:27:07 -0800 | [diff] [blame] | 788 | } |
| 789 | |
Jesse Barnes | 7648fa9 | 2010-05-20 14:28:11 -0700 | [diff] [blame] | 790 | if (ironlake_set_drps(dev, new_delay)) |
Daniel Vetter | 20e4d40 | 2012-08-08 23:35:39 +0200 | [diff] [blame] | 791 | dev_priv->ips.cur_delay = new_delay; |
Jesse Barnes | f97108d | 2010-01-29 11:27:07 -0800 | [diff] [blame] | 792 | |
Daniel Vetter | d0ecd7e | 2013-07-04 23:35:25 +0200 | [diff] [blame] | 793 | spin_unlock(&mchdev_lock); |
Daniel Vetter | 9270388 | 2012-08-09 16:46:01 +0200 | [diff] [blame] | 794 | |
Jesse Barnes | f97108d | 2010-01-29 11:27:07 -0800 | [diff] [blame] | 795 | return; |
| 796 | } |
| 797 | |
Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 798 | static void notify_ring(struct drm_device *dev, |
| 799 | struct intel_ring_buffer *ring) |
| 800 | { |
Chris Wilson | 475553d | 2011-01-20 09:52:56 +0000 | [diff] [blame] | 801 | if (ring->obj == NULL) |
| 802 | return; |
| 803 | |
Chris Wilson | b2eadbc | 2012-08-09 10:58:30 +0100 | [diff] [blame] | 804 | trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); |
Chris Wilson | 9862e60 | 2011-01-04 22:22:17 +0000 | [diff] [blame] | 805 | |
Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 806 | wake_up_all(&ring->irq_queue); |
Mika Kuoppala | 10cd45b | 2013-07-03 17:22:08 +0300 | [diff] [blame] | 807 | i915_queue_hangcheck(dev); |
Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 808 | } |
| 809 | |
Ben Widawsky | 4912d04 | 2011-04-25 11:25:20 -0700 | [diff] [blame] | 810 | static void gen6_pm_rps_work(struct work_struct *work) |
Jesse Barnes | 3b8d8d9 | 2010-12-17 14:19:02 -0800 | [diff] [blame] | 811 | { |
Ben Widawsky | 4912d04 | 2011-04-25 11:25:20 -0700 | [diff] [blame] | 812 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
Daniel Vetter | c6a828d | 2012-08-08 23:35:35 +0200 | [diff] [blame] | 813 | rps.work); |
Paulo Zanoni | edbfdb4 | 2013-08-06 18:57:13 -0300 | [diff] [blame] | 814 | u32 pm_iir; |
Chris Wilson | 7b9e0ae | 2012-04-28 08:56:39 +0100 | [diff] [blame] | 815 | u8 new_delay; |
Jesse Barnes | 3b8d8d9 | 2010-12-17 14:19:02 -0800 | [diff] [blame] | 816 | |
Daniel Vetter | 59cdb63 | 2013-07-04 23:35:28 +0200 | [diff] [blame] | 817 | spin_lock_irq(&dev_priv->irq_lock); |
Daniel Vetter | c6a828d | 2012-08-08 23:35:35 +0200 | [diff] [blame] | 818 | pm_iir = dev_priv->rps.pm_iir; |
| 819 | dev_priv->rps.pm_iir = 0; |
Ben Widawsky | 4848405 | 2013-05-28 19:22:27 -0700 | [diff] [blame] | 820 | /* Make sure not to corrupt PMIMR state used by ringbuffer code */ |
Paulo Zanoni | edbfdb4 | 2013-08-06 18:57:13 -0300 | [diff] [blame] | 821 | snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); |
Daniel Vetter | 59cdb63 | 2013-07-04 23:35:28 +0200 | [diff] [blame] | 822 | spin_unlock_irq(&dev_priv->irq_lock); |
Ben Widawsky | 4912d04 | 2011-04-25 11:25:20 -0700 | [diff] [blame] | 823 | |
Paulo Zanoni | 60611c1 | 2013-08-15 11:50:01 -0300 | [diff] [blame] | 824 | /* Make sure we didn't queue anything we're not going to process. */ |
| 825 | WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); |
| 826 | |
Ben Widawsky | 4848405 | 2013-05-28 19:22:27 -0700 | [diff] [blame] | 827 | if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) |
Jesse Barnes | 3b8d8d9 | 2010-12-17 14:19:02 -0800 | [diff] [blame] | 828 | return; |
| 829 | |
Jesse Barnes | 4fc688c | 2012-11-02 11:14:01 -0700 | [diff] [blame] | 830 | mutex_lock(&dev_priv->rps.hw_lock); |
Chris Wilson | 7b9e0ae | 2012-04-28 08:56:39 +0100 | [diff] [blame] | 831 | |
Ville Syrjälä | 7425034 | 2013-06-25 21:38:11 +0300 | [diff] [blame] | 832 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { |
Daniel Vetter | c6a828d | 2012-08-08 23:35:35 +0200 | [diff] [blame] | 833 | new_delay = dev_priv->rps.cur_delay + 1; |
Ville Syrjälä | 7425034 | 2013-06-25 21:38:11 +0300 | [diff] [blame] | 834 | |
| 835 | /* |
| 836 | * For better performance, jump directly |
| 837 | * to RPe if we're below it. |
| 838 | */ |
| 839 | if (IS_VALLEYVIEW(dev_priv->dev) && |
| 840 | dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) |
| 841 | new_delay = dev_priv->rps.rpe_delay; |
| 842 | } else |
Daniel Vetter | c6a828d | 2012-08-08 23:35:35 +0200 | [diff] [blame] | 843 | new_delay = dev_priv->rps.cur_delay - 1; |
Jesse Barnes | 3b8d8d9 | 2010-12-17 14:19:02 -0800 | [diff] [blame] | 844 | |
Ben Widawsky | 7924963 | 2012-09-07 19:43:42 -0700 | [diff] [blame] | 845 | /* sysfs frequency interfaces may have snuck in while servicing the |
| 846 | * interrupt |
| 847 | */ |
Ville Syrjälä | d8289c9 | 2013-06-25 19:21:05 +0300 | [diff] [blame] | 848 | if (new_delay >= dev_priv->rps.min_delay && |
| 849 | new_delay <= dev_priv->rps.max_delay) { |
Jesse Barnes | 0a073b8 | 2013-04-17 15:54:58 -0700 | [diff] [blame] | 850 | if (IS_VALLEYVIEW(dev_priv->dev)) |
| 851 | valleyview_set_rps(dev_priv->dev, new_delay); |
| 852 | else |
| 853 | gen6_set_rps(dev_priv->dev, new_delay); |
Ben Widawsky | 7924963 | 2012-09-07 19:43:42 -0700 | [diff] [blame] | 854 | } |
Jesse Barnes | 3b8d8d9 | 2010-12-17 14:19:02 -0800 | [diff] [blame] | 855 | |
Jesse Barnes | 52ceb90 | 2013-04-23 10:09:26 -0700 | [diff] [blame] | 856 | if (IS_VALLEYVIEW(dev_priv->dev)) { |
| 857 | /* |
| 858 | * On VLV, when we enter RC6 we may not be at the minimum |
| 859 | * voltage level, so arm a timer to check. It should only |
| 860 | * fire when there's activity or once after we've entered |
| 861 | * RC6, and then won't be re-armed until the next RPS interrupt. |
| 862 | */ |
| 863 | mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work, |
| 864 | msecs_to_jiffies(100)); |
| 865 | } |
| 866 | |
Jesse Barnes | 4fc688c | 2012-11-02 11:14:01 -0700 | [diff] [blame] | 867 | mutex_unlock(&dev_priv->rps.hw_lock); |
Jesse Barnes | 3b8d8d9 | 2010-12-17 14:19:02 -0800 | [diff] [blame] | 868 | } |
| 869 | |
Ben Widawsky | e368919 | 2012-05-25 16:56:22 -0700 | [diff] [blame] | 870 | |
| 871 | /** |
| 872 | * ivybridge_parity_work - Workqueue called when a parity error interrupt |
| 873 | * occurred. |
| 874 | * @work: workqueue struct |
| 875 | * |
| 876 | * Doesn't actually do anything except notify userspace. As a consequence of |
| 877 | * this event, userspace should try to remap the bad rows since statistically |
| 878 | * it is likely the same row is more likely to go bad again. |
| 879 | */ |
| 880 | static void ivybridge_parity_work(struct work_struct *work) |
| 881 | { |
| 882 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
Daniel Vetter | a4da4fa | 2012-11-02 19:55:07 +0100 | [diff] [blame] | 883 | l3_parity.error_work); |
Ben Widawsky | e368919 | 2012-05-25 16:56:22 -0700 | [diff] [blame] | 884 | u32 error_status, row, bank, subbank; |
| 885 | char *parity_event[5]; |
| 886 | uint32_t misccpctl; |
| 887 | unsigned long flags; |
| 888 | |
| 889 | /* We must turn off DOP level clock gating to access the L3 registers. |
| 890 | * In order to prevent a get/put style interface, acquire struct mutex |
| 891 | * any time we access those registers. |
| 892 | */ |
| 893 | mutex_lock(&dev_priv->dev->struct_mutex); |
| 894 | |
| 895 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
| 896 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); |
| 897 | POSTING_READ(GEN7_MISCCPCTL); |
| 898 | |
| 899 | error_status = I915_READ(GEN7_L3CDERRST1); |
| 900 | row = GEN7_PARITY_ERROR_ROW(error_status); |
| 901 | bank = GEN7_PARITY_ERROR_BANK(error_status); |
| 902 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); |
| 903 | |
| 904 | I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | |
| 905 | GEN7_L3CDERRST1_ENABLE); |
| 906 | POSTING_READ(GEN7_L3CDERRST1); |
| 907 | |
| 908 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); |
| 909 | |
| 910 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
Paulo Zanoni | 43eaea1 | 2013-08-06 18:57:12 -0300 | [diff] [blame] | 911 | ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); |
Ben Widawsky | e368919 | 2012-05-25 16:56:22 -0700 | [diff] [blame] | 912 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
| 913 | |
| 914 | mutex_unlock(&dev_priv->dev->struct_mutex); |
| 915 | |
Ben Widawsky | cce723e | 2013-07-19 09:16:42 -0700 | [diff] [blame] | 916 | parity_event[0] = I915_L3_PARITY_UEVENT "=1"; |
Ben Widawsky | e368919 | 2012-05-25 16:56:22 -0700 | [diff] [blame] | 917 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); |
| 918 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); |
| 919 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); |
| 920 | parity_event[4] = NULL; |
| 921 | |
| 922 | kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, |
| 923 | KOBJ_CHANGE, parity_event); |
| 924 | |
| 925 | DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", |
| 926 | row, bank, subbank); |
| 927 | |
| 928 | kfree(parity_event[3]); |
| 929 | kfree(parity_event[2]); |
| 930 | kfree(parity_event[1]); |
| 931 | } |
| 932 | |
Daniel Vetter | d0ecd7e | 2013-07-04 23:35:25 +0200 | [diff] [blame] | 933 | static void ivybridge_parity_error_irq_handler(struct drm_device *dev) |
Ben Widawsky | e368919 | 2012-05-25 16:56:22 -0700 | [diff] [blame] | 934 | { |
| 935 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Ben Widawsky | e368919 | 2012-05-25 16:56:22 -0700 | [diff] [blame] | 936 | |
Ben Widawsky | e1ef7cc | 2012-07-24 20:47:31 -0700 | [diff] [blame] | 937 | if (!HAS_L3_GPU_CACHE(dev)) |
Ben Widawsky | e368919 | 2012-05-25 16:56:22 -0700 | [diff] [blame] | 938 | return; |
| 939 | |
Daniel Vetter | d0ecd7e | 2013-07-04 23:35:25 +0200 | [diff] [blame] | 940 | spin_lock(&dev_priv->irq_lock); |
Paulo Zanoni | 43eaea1 | 2013-08-06 18:57:12 -0300 | [diff] [blame] | 941 | ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); |
Daniel Vetter | d0ecd7e | 2013-07-04 23:35:25 +0200 | [diff] [blame] | 942 | spin_unlock(&dev_priv->irq_lock); |
Ben Widawsky | e368919 | 2012-05-25 16:56:22 -0700 | [diff] [blame] | 943 | |
Daniel Vetter | a4da4fa | 2012-11-02 19:55:07 +0100 | [diff] [blame] | 944 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); |
Ben Widawsky | e368919 | 2012-05-25 16:56:22 -0700 | [diff] [blame] | 945 | } |
| 946 | |
Paulo Zanoni | f1af8fc | 2013-07-12 19:56:30 -0300 | [diff] [blame] | 947 | static void ilk_gt_irq_handler(struct drm_device *dev, |
| 948 | struct drm_i915_private *dev_priv, |
| 949 | u32 gt_iir) |
| 950 | { |
| 951 | if (gt_iir & |
| 952 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) |
| 953 | notify_ring(dev, &dev_priv->ring[RCS]); |
| 954 | if (gt_iir & ILK_BSD_USER_INTERRUPT) |
| 955 | notify_ring(dev, &dev_priv->ring[VCS]); |
| 956 | } |
| 957 | |
Daniel Vetter | e7b4c6b | 2012-03-30 20:24:35 +0200 | [diff] [blame] | 958 | static void snb_gt_irq_handler(struct drm_device *dev, |
| 959 | struct drm_i915_private *dev_priv, |
| 960 | u32 gt_iir) |
| 961 | { |
| 962 | |
Ben Widawsky | cc609d5 | 2013-05-28 19:22:29 -0700 | [diff] [blame] | 963 | if (gt_iir & |
| 964 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) |
Daniel Vetter | e7b4c6b | 2012-03-30 20:24:35 +0200 | [diff] [blame] | 965 | notify_ring(dev, &dev_priv->ring[RCS]); |
Ben Widawsky | cc609d5 | 2013-05-28 19:22:29 -0700 | [diff] [blame] | 966 | if (gt_iir & GT_BSD_USER_INTERRUPT) |
Daniel Vetter | e7b4c6b | 2012-03-30 20:24:35 +0200 | [diff] [blame] | 967 | notify_ring(dev, &dev_priv->ring[VCS]); |
Ben Widawsky | cc609d5 | 2013-05-28 19:22:29 -0700 | [diff] [blame] | 968 | if (gt_iir & GT_BLT_USER_INTERRUPT) |
Daniel Vetter | e7b4c6b | 2012-03-30 20:24:35 +0200 | [diff] [blame] | 969 | notify_ring(dev, &dev_priv->ring[BCS]); |
| 970 | |
Ben Widawsky | cc609d5 | 2013-05-28 19:22:29 -0700 | [diff] [blame] | 971 | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | |
| 972 | GT_BSD_CS_ERROR_INTERRUPT | |
| 973 | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { |
Daniel Vetter | e7b4c6b | 2012-03-30 20:24:35 +0200 | [diff] [blame] | 974 | DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); |
| 975 | i915_handle_error(dev, false); |
| 976 | } |
Ben Widawsky | e368919 | 2012-05-25 16:56:22 -0700 | [diff] [blame] | 977 | |
Ben Widawsky | cc609d5 | 2013-05-28 19:22:29 -0700 | [diff] [blame] | 978 | if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) |
Daniel Vetter | d0ecd7e | 2013-07-04 23:35:25 +0200 | [diff] [blame] | 979 | ivybridge_parity_error_irq_handler(dev); |
Daniel Vetter | e7b4c6b | 2012-03-30 20:24:35 +0200 | [diff] [blame] | 980 | } |
| 981 | |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 982 | #define HPD_STORM_DETECT_PERIOD 1000 |
| 983 | #define HPD_STORM_THRESHOLD 5 |
| 984 | |
Daniel Vetter | 10a504d | 2013-06-27 17:52:12 +0200 | [diff] [blame] | 985 | static inline void intel_hpd_irq_handler(struct drm_device *dev, |
Daniel Vetter | 22062db | 2013-06-27 17:52:11 +0200 | [diff] [blame] | 986 | u32 hotplug_trigger, |
| 987 | const u32 *hpd) |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 988 | { |
| 989 | drm_i915_private_t *dev_priv = dev->dev_private; |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 990 | int i; |
Daniel Vetter | 10a504d | 2013-06-27 17:52:12 +0200 | [diff] [blame] | 991 | bool storm_detected = false; |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 992 | |
Daniel Vetter | 91d131d | 2013-06-27 17:52:14 +0200 | [diff] [blame] | 993 | if (!hotplug_trigger) |
| 994 | return; |
| 995 | |
Daniel Vetter | b5ea2d5 | 2013-06-27 17:52:15 +0200 | [diff] [blame] | 996 | spin_lock(&dev_priv->irq_lock); |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 997 | for (i = 1; i < HPD_NUM_PINS; i++) { |
Egbert Eich | 821450c | 2013-04-16 13:36:55 +0200 | [diff] [blame] | 998 | |
Egbert Eich | b8f102e | 2013-07-26 14:14:24 +0200 | [diff] [blame] | 999 | WARN(((hpd[i] & hotplug_trigger) && |
| 1000 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), |
| 1001 | "Received HPD interrupt although disabled\n"); |
| 1002 | |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 1003 | if (!(hpd[i] & hotplug_trigger) || |
| 1004 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) |
| 1005 | continue; |
| 1006 | |
Jani Nikula | bc5ead8c | 2013-05-07 15:10:29 +0300 | [diff] [blame] | 1007 | dev_priv->hpd_event_bits |= (1 << i); |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 1008 | if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, |
| 1009 | dev_priv->hpd_stats[i].hpd_last_jiffies |
| 1010 | + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { |
| 1011 | dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; |
| 1012 | dev_priv->hpd_stats[i].hpd_cnt = 0; |
Egbert Eich | b8f102e | 2013-07-26 14:14:24 +0200 | [diff] [blame] | 1013 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 1014 | } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { |
| 1015 | dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; |
Egbert Eich | 142e239 | 2013-04-11 15:57:57 +0200 | [diff] [blame] | 1016 | dev_priv->hpd_event_bits &= ~(1 << i); |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 1017 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); |
Daniel Vetter | 10a504d | 2013-06-27 17:52:12 +0200 | [diff] [blame] | 1018 | storm_detected = true; |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 1019 | } else { |
| 1020 | dev_priv->hpd_stats[i].hpd_cnt++; |
Egbert Eich | b8f102e | 2013-07-26 14:14:24 +0200 | [diff] [blame] | 1021 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, |
| 1022 | dev_priv->hpd_stats[i].hpd_cnt); |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 1023 | } |
| 1024 | } |
| 1025 | |
Daniel Vetter | 10a504d | 2013-06-27 17:52:12 +0200 | [diff] [blame] | 1026 | if (storm_detected) |
| 1027 | dev_priv->display.hpd_irq_setup(dev); |
Daniel Vetter | b5ea2d5 | 2013-06-27 17:52:15 +0200 | [diff] [blame] | 1028 | spin_unlock(&dev_priv->irq_lock); |
Daniel Vetter | 5876fa0 | 2013-06-27 17:52:13 +0200 | [diff] [blame] | 1029 | |
Daniel Vetter | 645416f | 2013-09-02 16:22:25 +0200 | [diff] [blame] | 1030 | /* |
| 1031 | * Our hotplug handler can grab modeset locks (by calling down into the |
| 1032 | * fb helpers). Hence it must not be run on our own dev-priv->wq work |
| 1033 | * queue for otherwise the flush_work in the pageflip code will |
| 1034 | * deadlock. |
| 1035 | */ |
| 1036 | schedule_work(&dev_priv->hotplug_work); |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 1037 | } |
| 1038 | |
Daniel Vetter | 515ac2b | 2012-12-01 13:53:44 +0100 | [diff] [blame] | 1039 | static void gmbus_irq_handler(struct drm_device *dev) |
| 1040 | { |
Daniel Vetter | 28c70f1 | 2012-12-01 13:53:45 +0100 | [diff] [blame] | 1041 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1042 | |
Daniel Vetter | 28c70f1 | 2012-12-01 13:53:45 +0100 | [diff] [blame] | 1043 | wake_up_all(&dev_priv->gmbus_wait_queue); |
Daniel Vetter | 515ac2b | 2012-12-01 13:53:44 +0100 | [diff] [blame] | 1044 | } |
| 1045 | |
Daniel Vetter | ce99c25 | 2012-12-01 13:53:47 +0100 | [diff] [blame] | 1046 | static void dp_aux_irq_handler(struct drm_device *dev) |
| 1047 | { |
Daniel Vetter | 9ee32fea | 2012-12-01 13:53:48 +0100 | [diff] [blame] | 1048 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1049 | |
Daniel Vetter | 9ee32fea | 2012-12-01 13:53:48 +0100 | [diff] [blame] | 1050 | wake_up_all(&dev_priv->gmbus_wait_queue); |
Daniel Vetter | ce99c25 | 2012-12-01 13:53:47 +0100 | [diff] [blame] | 1051 | } |
| 1052 | |
Paulo Zanoni | 1403c0d | 2013-08-15 11:51:32 -0300 | [diff] [blame] | 1053 | /* The RPS events need forcewake, so we add them to a work queue and mask their |
| 1054 | * IMR bits until the work is done. Other interrupts can be processed without |
| 1055 | * the work queue. */ |
| 1056 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) |
Ben Widawsky | baf02a1 | 2013-05-28 19:22:24 -0700 | [diff] [blame] | 1057 | { |
Daniel Vetter | 41a05a3 | 2013-07-04 23:35:26 +0200 | [diff] [blame] | 1058 | if (pm_iir & GEN6_PM_RPS_EVENTS) { |
Daniel Vetter | 59cdb63 | 2013-07-04 23:35:28 +0200 | [diff] [blame] | 1059 | spin_lock(&dev_priv->irq_lock); |
Daniel Vetter | 41a05a3 | 2013-07-04 23:35:26 +0200 | [diff] [blame] | 1060 | dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; |
Paulo Zanoni | 4d3b3d5 | 2013-08-09 17:04:36 -0300 | [diff] [blame] | 1061 | snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); |
Daniel Vetter | 59cdb63 | 2013-07-04 23:35:28 +0200 | [diff] [blame] | 1062 | spin_unlock(&dev_priv->irq_lock); |
Daniel Vetter | 2adbee6 | 2013-07-04 23:35:27 +0200 | [diff] [blame] | 1063 | |
| 1064 | queue_work(dev_priv->wq, &dev_priv->rps.work); |
Ben Widawsky | baf02a1 | 2013-05-28 19:22:24 -0700 | [diff] [blame] | 1065 | } |
Ben Widawsky | baf02a1 | 2013-05-28 19:22:24 -0700 | [diff] [blame] | 1066 | |
Paulo Zanoni | 1403c0d | 2013-08-15 11:51:32 -0300 | [diff] [blame] | 1067 | if (HAS_VEBOX(dev_priv->dev)) { |
| 1068 | if (pm_iir & PM_VEBOX_USER_INTERRUPT) |
| 1069 | notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); |
Ben Widawsky | 12638c5 | 2013-05-28 19:22:31 -0700 | [diff] [blame] | 1070 | |
Paulo Zanoni | 1403c0d | 2013-08-15 11:51:32 -0300 | [diff] [blame] | 1071 | if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { |
| 1072 | DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); |
| 1073 | i915_handle_error(dev_priv->dev, false); |
| 1074 | } |
Ben Widawsky | 12638c5 | 2013-05-28 19:22:31 -0700 | [diff] [blame] | 1075 | } |
Ben Widawsky | baf02a1 | 2013-05-28 19:22:24 -0700 | [diff] [blame] | 1076 | } |
| 1077 | |
Daniel Vetter | ff1f525 | 2012-10-02 15:10:55 +0200 | [diff] [blame] | 1078 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1079 | { |
| 1080 | struct drm_device *dev = (struct drm_device *) arg; |
| 1081 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1082 | u32 iir, gt_iir, pm_iir; |
| 1083 | irqreturn_t ret = IRQ_NONE; |
| 1084 | unsigned long irqflags; |
| 1085 | int pipe; |
| 1086 | u32 pipe_stats[I915_MAX_PIPES]; |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1087 | |
| 1088 | atomic_inc(&dev_priv->irq_received); |
| 1089 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1090 | while (true) { |
| 1091 | iir = I915_READ(VLV_IIR); |
| 1092 | gt_iir = I915_READ(GTIIR); |
| 1093 | pm_iir = I915_READ(GEN6_PMIIR); |
| 1094 | |
| 1095 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) |
| 1096 | goto out; |
| 1097 | |
| 1098 | ret = IRQ_HANDLED; |
| 1099 | |
Daniel Vetter | e7b4c6b | 2012-03-30 20:24:35 +0200 | [diff] [blame] | 1100 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1101 | |
| 1102 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 1103 | for_each_pipe(pipe) { |
| 1104 | int reg = PIPESTAT(pipe); |
| 1105 | pipe_stats[pipe] = I915_READ(reg); |
| 1106 | |
| 1107 | /* |
| 1108 | * Clear the PIPE*STAT regs before the IIR |
| 1109 | */ |
| 1110 | if (pipe_stats[pipe] & 0x8000ffff) { |
| 1111 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
| 1112 | DRM_DEBUG_DRIVER("pipe %c underrun\n", |
| 1113 | pipe_name(pipe)); |
| 1114 | I915_WRITE(reg, pipe_stats[pipe]); |
| 1115 | } |
| 1116 | } |
| 1117 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1118 | |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 1119 | for_each_pipe(pipe) { |
| 1120 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) |
| 1121 | drm_handle_vblank(dev, pipe); |
| 1122 | |
| 1123 | if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { |
| 1124 | intel_prepare_page_flip(dev, pipe); |
| 1125 | intel_finish_page_flip(dev, pipe); |
| 1126 | } |
| 1127 | } |
| 1128 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1129 | /* Consume port. Then clear IIR or we'll miss events */ |
| 1130 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
| 1131 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 1132 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1133 | |
| 1134 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
| 1135 | hotplug_status); |
Daniel Vetter | 91d131d | 2013-06-27 17:52:14 +0200 | [diff] [blame] | 1136 | |
| 1137 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); |
| 1138 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1139 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
| 1140 | I915_READ(PORT_HOTPLUG_STAT); |
| 1141 | } |
| 1142 | |
Daniel Vetter | 515ac2b | 2012-12-01 13:53:44 +0100 | [diff] [blame] | 1143 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
| 1144 | gmbus_irq_handler(dev); |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1145 | |
Paulo Zanoni | 60611c1 | 2013-08-15 11:50:01 -0300 | [diff] [blame] | 1146 | if (pm_iir) |
Daniel Vetter | d0ecd7e | 2013-07-04 23:35:25 +0200 | [diff] [blame] | 1147 | gen6_rps_irq_handler(dev_priv, pm_iir); |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1148 | |
| 1149 | I915_WRITE(GTIIR, gt_iir); |
| 1150 | I915_WRITE(GEN6_PMIIR, pm_iir); |
| 1151 | I915_WRITE(VLV_IIR, iir); |
| 1152 | } |
| 1153 | |
| 1154 | out: |
| 1155 | return ret; |
| 1156 | } |
| 1157 | |
Adam Jackson | 23e81d6 | 2012-06-06 15:45:44 -0400 | [diff] [blame] | 1158 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) |
Jesse Barnes | 776ad80 | 2011-01-04 15:09:39 -0800 | [diff] [blame] | 1159 | { |
| 1160 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 1161 | int pipe; |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 1162 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; |
Jesse Barnes | 776ad80 | 2011-01-04 15:09:39 -0800 | [diff] [blame] | 1163 | |
Daniel Vetter | 91d131d | 2013-06-27 17:52:14 +0200 | [diff] [blame] | 1164 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); |
| 1165 | |
Ville Syrjälä | cfc33bf | 2013-04-17 17:48:48 +0300 | [diff] [blame] | 1166 | if (pch_iir & SDE_AUDIO_POWER_MASK) { |
| 1167 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> |
| 1168 | SDE_AUDIO_POWER_SHIFT); |
Jesse Barnes | 776ad80 | 2011-01-04 15:09:39 -0800 | [diff] [blame] | 1169 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
Ville Syrjälä | cfc33bf | 2013-04-17 17:48:48 +0300 | [diff] [blame] | 1170 | port_name(port)); |
| 1171 | } |
Jesse Barnes | 776ad80 | 2011-01-04 15:09:39 -0800 | [diff] [blame] | 1172 | |
Daniel Vetter | ce99c25 | 2012-12-01 13:53:47 +0100 | [diff] [blame] | 1173 | if (pch_iir & SDE_AUX_MASK) |
| 1174 | dp_aux_irq_handler(dev); |
| 1175 | |
Jesse Barnes | 776ad80 | 2011-01-04 15:09:39 -0800 | [diff] [blame] | 1176 | if (pch_iir & SDE_GMBUS) |
Daniel Vetter | 515ac2b | 2012-12-01 13:53:44 +0100 | [diff] [blame] | 1177 | gmbus_irq_handler(dev); |
Jesse Barnes | 776ad80 | 2011-01-04 15:09:39 -0800 | [diff] [blame] | 1178 | |
| 1179 | if (pch_iir & SDE_AUDIO_HDCP_MASK) |
| 1180 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); |
| 1181 | |
| 1182 | if (pch_iir & SDE_AUDIO_TRANS_MASK) |
| 1183 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); |
| 1184 | |
| 1185 | if (pch_iir & SDE_POISON) |
| 1186 | DRM_ERROR("PCH poison interrupt\n"); |
| 1187 | |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 1188 | if (pch_iir & SDE_FDI_MASK) |
| 1189 | for_each_pipe(pipe) |
| 1190 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", |
| 1191 | pipe_name(pipe), |
| 1192 | I915_READ(FDI_RX_IIR(pipe))); |
Jesse Barnes | 776ad80 | 2011-01-04 15:09:39 -0800 | [diff] [blame] | 1193 | |
| 1194 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) |
| 1195 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); |
| 1196 | |
| 1197 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) |
| 1198 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); |
| 1199 | |
Jesse Barnes | 776ad80 | 2011-01-04 15:09:39 -0800 | [diff] [blame] | 1200 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 1201 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, |
| 1202 | false)) |
| 1203 | DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); |
| 1204 | |
| 1205 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) |
| 1206 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, |
| 1207 | false)) |
| 1208 | DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); |
| 1209 | } |
| 1210 | |
| 1211 | static void ivb_err_int_handler(struct drm_device *dev) |
| 1212 | { |
| 1213 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1214 | u32 err_int = I915_READ(GEN7_ERR_INT); |
| 1215 | |
Paulo Zanoni | de032bf | 2013-04-12 17:57:58 -0300 | [diff] [blame] | 1216 | if (err_int & ERR_INT_POISON) |
| 1217 | DRM_ERROR("Poison interrupt\n"); |
| 1218 | |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 1219 | if (err_int & ERR_INT_FIFO_UNDERRUN_A) |
| 1220 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) |
| 1221 | DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); |
| 1222 | |
| 1223 | if (err_int & ERR_INT_FIFO_UNDERRUN_B) |
| 1224 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) |
| 1225 | DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); |
| 1226 | |
| 1227 | if (err_int & ERR_INT_FIFO_UNDERRUN_C) |
| 1228 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) |
| 1229 | DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); |
| 1230 | |
| 1231 | I915_WRITE(GEN7_ERR_INT, err_int); |
| 1232 | } |
| 1233 | |
| 1234 | static void cpt_serr_int_handler(struct drm_device *dev) |
| 1235 | { |
| 1236 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1237 | u32 serr_int = I915_READ(SERR_INT); |
| 1238 | |
Paulo Zanoni | de032bf | 2013-04-12 17:57:58 -0300 | [diff] [blame] | 1239 | if (serr_int & SERR_INT_POISON) |
| 1240 | DRM_ERROR("PCH poison interrupt\n"); |
| 1241 | |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 1242 | if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) |
| 1243 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, |
| 1244 | false)) |
| 1245 | DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); |
| 1246 | |
| 1247 | if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) |
| 1248 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, |
| 1249 | false)) |
| 1250 | DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); |
| 1251 | |
| 1252 | if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) |
| 1253 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, |
| 1254 | false)) |
| 1255 | DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); |
| 1256 | |
| 1257 | I915_WRITE(SERR_INT, serr_int); |
Jesse Barnes | 776ad80 | 2011-01-04 15:09:39 -0800 | [diff] [blame] | 1258 | } |
| 1259 | |
Adam Jackson | 23e81d6 | 2012-06-06 15:45:44 -0400 | [diff] [blame] | 1260 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) |
| 1261 | { |
| 1262 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1263 | int pipe; |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 1264 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; |
Adam Jackson | 23e81d6 | 2012-06-06 15:45:44 -0400 | [diff] [blame] | 1265 | |
Daniel Vetter | 91d131d | 2013-06-27 17:52:14 +0200 | [diff] [blame] | 1266 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); |
| 1267 | |
Ville Syrjälä | cfc33bf | 2013-04-17 17:48:48 +0300 | [diff] [blame] | 1268 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { |
| 1269 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> |
| 1270 | SDE_AUDIO_POWER_SHIFT_CPT); |
| 1271 | DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", |
| 1272 | port_name(port)); |
| 1273 | } |
Adam Jackson | 23e81d6 | 2012-06-06 15:45:44 -0400 | [diff] [blame] | 1274 | |
| 1275 | if (pch_iir & SDE_AUX_MASK_CPT) |
Daniel Vetter | ce99c25 | 2012-12-01 13:53:47 +0100 | [diff] [blame] | 1276 | dp_aux_irq_handler(dev); |
Adam Jackson | 23e81d6 | 2012-06-06 15:45:44 -0400 | [diff] [blame] | 1277 | |
| 1278 | if (pch_iir & SDE_GMBUS_CPT) |
Daniel Vetter | 515ac2b | 2012-12-01 13:53:44 +0100 | [diff] [blame] | 1279 | gmbus_irq_handler(dev); |
Adam Jackson | 23e81d6 | 2012-06-06 15:45:44 -0400 | [diff] [blame] | 1280 | |
| 1281 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) |
| 1282 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); |
| 1283 | |
| 1284 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) |
| 1285 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); |
| 1286 | |
| 1287 | if (pch_iir & SDE_FDI_MASK_CPT) |
| 1288 | for_each_pipe(pipe) |
| 1289 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", |
| 1290 | pipe_name(pipe), |
| 1291 | I915_READ(FDI_RX_IIR(pipe))); |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 1292 | |
| 1293 | if (pch_iir & SDE_ERROR_CPT) |
| 1294 | cpt_serr_int_handler(dev); |
Adam Jackson | 23e81d6 | 2012-06-06 15:45:44 -0400 | [diff] [blame] | 1295 | } |
| 1296 | |
Paulo Zanoni | c008bc6 | 2013-07-12 16:35:10 -0300 | [diff] [blame] | 1297 | static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) |
| 1298 | { |
| 1299 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1300 | |
| 1301 | if (de_iir & DE_AUX_CHANNEL_A) |
| 1302 | dp_aux_irq_handler(dev); |
| 1303 | |
| 1304 | if (de_iir & DE_GSE) |
| 1305 | intel_opregion_asle_intr(dev); |
| 1306 | |
| 1307 | if (de_iir & DE_PIPEA_VBLANK) |
| 1308 | drm_handle_vblank(dev, 0); |
| 1309 | |
| 1310 | if (de_iir & DE_PIPEB_VBLANK) |
| 1311 | drm_handle_vblank(dev, 1); |
| 1312 | |
| 1313 | if (de_iir & DE_POISON) |
| 1314 | DRM_ERROR("Poison interrupt\n"); |
| 1315 | |
| 1316 | if (de_iir & DE_PIPEA_FIFO_UNDERRUN) |
| 1317 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) |
| 1318 | DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); |
| 1319 | |
| 1320 | if (de_iir & DE_PIPEB_FIFO_UNDERRUN) |
| 1321 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) |
| 1322 | DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); |
| 1323 | |
| 1324 | if (de_iir & DE_PLANEA_FLIP_DONE) { |
| 1325 | intel_prepare_page_flip(dev, 0); |
| 1326 | intel_finish_page_flip_plane(dev, 0); |
| 1327 | } |
| 1328 | |
| 1329 | if (de_iir & DE_PLANEB_FLIP_DONE) { |
| 1330 | intel_prepare_page_flip(dev, 1); |
| 1331 | intel_finish_page_flip_plane(dev, 1); |
| 1332 | } |
| 1333 | |
| 1334 | /* check event from PCH */ |
| 1335 | if (de_iir & DE_PCH_EVENT) { |
| 1336 | u32 pch_iir = I915_READ(SDEIIR); |
| 1337 | |
| 1338 | if (HAS_PCH_CPT(dev)) |
| 1339 | cpt_irq_handler(dev, pch_iir); |
| 1340 | else |
| 1341 | ibx_irq_handler(dev, pch_iir); |
| 1342 | |
| 1343 | /* should clear PCH hotplug event before clear CPU irq */ |
| 1344 | I915_WRITE(SDEIIR, pch_iir); |
| 1345 | } |
| 1346 | |
| 1347 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) |
| 1348 | ironlake_rps_change_irq_handler(dev); |
| 1349 | } |
| 1350 | |
Paulo Zanoni | 9719fb9 | 2013-07-12 16:35:11 -0300 | [diff] [blame] | 1351 | static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) |
| 1352 | { |
| 1353 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1354 | int i; |
| 1355 | |
| 1356 | if (de_iir & DE_ERR_INT_IVB) |
| 1357 | ivb_err_int_handler(dev); |
| 1358 | |
| 1359 | if (de_iir & DE_AUX_CHANNEL_A_IVB) |
| 1360 | dp_aux_irq_handler(dev); |
| 1361 | |
| 1362 | if (de_iir & DE_GSE_IVB) |
| 1363 | intel_opregion_asle_intr(dev); |
| 1364 | |
| 1365 | for (i = 0; i < 3; i++) { |
| 1366 | if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) |
| 1367 | drm_handle_vblank(dev, i); |
| 1368 | if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { |
| 1369 | intel_prepare_page_flip(dev, i); |
| 1370 | intel_finish_page_flip_plane(dev, i); |
| 1371 | } |
| 1372 | } |
| 1373 | |
| 1374 | /* check event from PCH */ |
| 1375 | if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { |
| 1376 | u32 pch_iir = I915_READ(SDEIIR); |
| 1377 | |
| 1378 | cpt_irq_handler(dev, pch_iir); |
| 1379 | |
| 1380 | /* clear PCH hotplug event before clear CPU irq */ |
| 1381 | I915_WRITE(SDEIIR, pch_iir); |
| 1382 | } |
| 1383 | } |
| 1384 | |
Paulo Zanoni | f1af8fc | 2013-07-12 19:56:30 -0300 | [diff] [blame] | 1385 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) |
Jesse Barnes | b1f14ad | 2011-04-06 12:13:38 -0700 | [diff] [blame] | 1386 | { |
| 1387 | struct drm_device *dev = (struct drm_device *) arg; |
| 1388 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Paulo Zanoni | f1af8fc | 2013-07-12 19:56:30 -0300 | [diff] [blame] | 1389 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; |
Chris Wilson | 0e43406 | 2012-05-09 21:45:44 +0100 | [diff] [blame] | 1390 | irqreturn_t ret = IRQ_NONE; |
Paulo Zanoni | 333a820 | 2013-08-06 18:57:16 -0300 | [diff] [blame] | 1391 | bool err_int_reenable = false; |
Jesse Barnes | b1f14ad | 2011-04-06 12:13:38 -0700 | [diff] [blame] | 1392 | |
| 1393 | atomic_inc(&dev_priv->irq_received); |
| 1394 | |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 1395 | /* We get interrupts on unclaimed registers, so check for this before we |
| 1396 | * do any I915_{READ,WRITE}. */ |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1397 | intel_uncore_check_errors(dev); |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 1398 | |
Jesse Barnes | b1f14ad | 2011-04-06 12:13:38 -0700 | [diff] [blame] | 1399 | /* disable master interrupt before clearing iir */ |
| 1400 | de_ier = I915_READ(DEIER); |
| 1401 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
Paulo Zanoni | 23a7851 | 2013-07-12 16:35:14 -0300 | [diff] [blame] | 1402 | POSTING_READ(DEIER); |
Chris Wilson | 0e43406 | 2012-05-09 21:45:44 +0100 | [diff] [blame] | 1403 | |
Paulo Zanoni | 44498ae | 2013-02-22 17:05:28 -0300 | [diff] [blame] | 1404 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
| 1405 | * interrupts will will be stored on its back queue, and then we'll be |
| 1406 | * able to process them after we restore SDEIER (as soon as we restore |
| 1407 | * it, we'll get an interrupt if SDEIIR still has something to process |
| 1408 | * due to its back queue). */ |
Ben Widawsky | ab5c608 | 2013-04-05 13:12:41 -0700 | [diff] [blame] | 1409 | if (!HAS_PCH_NOP(dev)) { |
| 1410 | sde_ier = I915_READ(SDEIER); |
| 1411 | I915_WRITE(SDEIER, 0); |
| 1412 | POSTING_READ(SDEIER); |
| 1413 | } |
Paulo Zanoni | 44498ae | 2013-02-22 17:05:28 -0300 | [diff] [blame] | 1414 | |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 1415 | /* On Haswell, also mask ERR_INT because we don't want to risk |
| 1416 | * generating "unclaimed register" interrupts from inside the interrupt |
| 1417 | * handler. */ |
Daniel Vetter | 4bc9d43 | 2013-06-27 13:44:58 +0200 | [diff] [blame] | 1418 | if (IS_HASWELL(dev)) { |
| 1419 | spin_lock(&dev_priv->irq_lock); |
Paulo Zanoni | 333a820 | 2013-08-06 18:57:16 -0300 | [diff] [blame] | 1420 | err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB; |
| 1421 | if (err_int_reenable) |
| 1422 | ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); |
Daniel Vetter | 4bc9d43 | 2013-06-27 13:44:58 +0200 | [diff] [blame] | 1423 | spin_unlock(&dev_priv->irq_lock); |
| 1424 | } |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 1425 | |
Chris Wilson | 0e43406 | 2012-05-09 21:45:44 +0100 | [diff] [blame] | 1426 | gt_iir = I915_READ(GTIIR); |
| 1427 | if (gt_iir) { |
Paulo Zanoni | d8fc8a4 | 2013-07-19 18:57:55 -0300 | [diff] [blame] | 1428 | if (INTEL_INFO(dev)->gen >= 6) |
Paulo Zanoni | f1af8fc | 2013-07-12 19:56:30 -0300 | [diff] [blame] | 1429 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
Paulo Zanoni | d8fc8a4 | 2013-07-19 18:57:55 -0300 | [diff] [blame] | 1430 | else |
| 1431 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); |
Chris Wilson | 0e43406 | 2012-05-09 21:45:44 +0100 | [diff] [blame] | 1432 | I915_WRITE(GTIIR, gt_iir); |
| 1433 | ret = IRQ_HANDLED; |
| 1434 | } |
Jesse Barnes | b1f14ad | 2011-04-06 12:13:38 -0700 | [diff] [blame] | 1435 | |
| 1436 | de_iir = I915_READ(DEIIR); |
Chris Wilson | 0e43406 | 2012-05-09 21:45:44 +0100 | [diff] [blame] | 1437 | if (de_iir) { |
Paulo Zanoni | f1af8fc | 2013-07-12 19:56:30 -0300 | [diff] [blame] | 1438 | if (INTEL_INFO(dev)->gen >= 7) |
| 1439 | ivb_display_irq_handler(dev, de_iir); |
| 1440 | else |
| 1441 | ilk_display_irq_handler(dev, de_iir); |
Chris Wilson | 0e43406 | 2012-05-09 21:45:44 +0100 | [diff] [blame] | 1442 | I915_WRITE(DEIIR, de_iir); |
| 1443 | ret = IRQ_HANDLED; |
| 1444 | } |
| 1445 | |
Paulo Zanoni | f1af8fc | 2013-07-12 19:56:30 -0300 | [diff] [blame] | 1446 | if (INTEL_INFO(dev)->gen >= 6) { |
| 1447 | u32 pm_iir = I915_READ(GEN6_PMIIR); |
| 1448 | if (pm_iir) { |
Paulo Zanoni | 1403c0d | 2013-08-15 11:51:32 -0300 | [diff] [blame] | 1449 | gen6_rps_irq_handler(dev_priv, pm_iir); |
Paulo Zanoni | f1af8fc | 2013-07-12 19:56:30 -0300 | [diff] [blame] | 1450 | I915_WRITE(GEN6_PMIIR, pm_iir); |
| 1451 | ret = IRQ_HANDLED; |
| 1452 | } |
Jesse Barnes | b1f14ad | 2011-04-06 12:13:38 -0700 | [diff] [blame] | 1453 | } |
| 1454 | |
Paulo Zanoni | 333a820 | 2013-08-06 18:57:16 -0300 | [diff] [blame] | 1455 | if (err_int_reenable) { |
Daniel Vetter | 4bc9d43 | 2013-06-27 13:44:58 +0200 | [diff] [blame] | 1456 | spin_lock(&dev_priv->irq_lock); |
| 1457 | if (ivb_can_enable_err_int(dev)) |
| 1458 | ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); |
| 1459 | spin_unlock(&dev_priv->irq_lock); |
| 1460 | } |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 1461 | |
Jesse Barnes | b1f14ad | 2011-04-06 12:13:38 -0700 | [diff] [blame] | 1462 | I915_WRITE(DEIER, de_ier); |
| 1463 | POSTING_READ(DEIER); |
Ben Widawsky | ab5c608 | 2013-04-05 13:12:41 -0700 | [diff] [blame] | 1464 | if (!HAS_PCH_NOP(dev)) { |
| 1465 | I915_WRITE(SDEIER, sde_ier); |
| 1466 | POSTING_READ(SDEIER); |
| 1467 | } |
Jesse Barnes | b1f14ad | 2011-04-06 12:13:38 -0700 | [diff] [blame] | 1468 | |
| 1469 | return ret; |
| 1470 | } |
| 1471 | |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1472 | /** |
| 1473 | * i915_error_work_func - do process context error handling work |
| 1474 | * @work: work struct |
| 1475 | * |
| 1476 | * Fire an error uevent so userspace can see that a hang or error |
| 1477 | * was detected. |
| 1478 | */ |
| 1479 | static void i915_error_work_func(struct work_struct *work) |
| 1480 | { |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1481 | struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, |
| 1482 | work); |
| 1483 | drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, |
| 1484 | gpu_error); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1485 | struct drm_device *dev = dev_priv->dev; |
Daniel Vetter | f69061b | 2012-12-06 09:01:42 +0100 | [diff] [blame] | 1486 | struct intel_ring_buffer *ring; |
Ben Widawsky | cce723e | 2013-07-19 09:16:42 -0700 | [diff] [blame] | 1487 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; |
| 1488 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; |
| 1489 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; |
Daniel Vetter | f69061b | 2012-12-06 09:01:42 +0100 | [diff] [blame] | 1490 | int i, ret; |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1491 | |
Ben Gamari | f316a42 | 2009-09-14 17:48:46 -0400 | [diff] [blame] | 1492 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1493 | |
Daniel Vetter | 7db0ba2 | 2012-12-06 16:23:37 +0100 | [diff] [blame] | 1494 | /* |
| 1495 | * Note that there's only one work item which does gpu resets, so we |
| 1496 | * need not worry about concurrent gpu resets potentially incrementing |
| 1497 | * error->reset_counter twice. We only need to take care of another |
| 1498 | * racing irq/hangcheck declaring the gpu dead for a second time. A |
| 1499 | * quick check for that is good enough: schedule_work ensures the |
| 1500 | * correct ordering between hang detection and this work item, and since |
| 1501 | * the reset in-progress bit is only ever set by code outside of this |
| 1502 | * work we don't need to worry about any other races. |
| 1503 | */ |
| 1504 | if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { |
Chris Wilson | f803aa5 | 2010-09-19 12:38:26 +0100 | [diff] [blame] | 1505 | DRM_DEBUG_DRIVER("resetting chip\n"); |
Daniel Vetter | 7db0ba2 | 2012-12-06 16:23:37 +0100 | [diff] [blame] | 1506 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, |
| 1507 | reset_event); |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1508 | |
Daniel Vetter | f69061b | 2012-12-06 09:01:42 +0100 | [diff] [blame] | 1509 | ret = i915_reset(dev); |
| 1510 | |
| 1511 | if (ret == 0) { |
| 1512 | /* |
| 1513 | * After all the gem state is reset, increment the reset |
| 1514 | * counter and wake up everyone waiting for the reset to |
| 1515 | * complete. |
| 1516 | * |
| 1517 | * Since unlock operations are a one-sided barrier only, |
| 1518 | * we need to insert a barrier here to order any seqno |
| 1519 | * updates before |
| 1520 | * the counter increment. |
| 1521 | */ |
| 1522 | smp_mb__before_atomic_inc(); |
| 1523 | atomic_inc(&dev_priv->gpu_error.reset_counter); |
| 1524 | |
| 1525 | kobject_uevent_env(&dev->primary->kdev.kobj, |
| 1526 | KOBJ_CHANGE, reset_done_event); |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1527 | } else { |
| 1528 | atomic_set(&error->reset_counter, I915_WEDGED); |
Ben Gamari | f316a42 | 2009-09-14 17:48:46 -0400 | [diff] [blame] | 1529 | } |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1530 | |
Daniel Vetter | f69061b | 2012-12-06 09:01:42 +0100 | [diff] [blame] | 1531 | for_each_ring(ring, dev_priv, i) |
| 1532 | wake_up_all(&ring->irq_queue); |
| 1533 | |
Ville Syrjälä | 96a0291 | 2013-02-18 19:08:49 +0200 | [diff] [blame] | 1534 | intel_display_handle_reset(dev); |
| 1535 | |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1536 | wake_up_all(&dev_priv->gpu_error.reset_queue); |
Ben Gamari | f316a42 | 2009-09-14 17:48:46 -0400 | [diff] [blame] | 1537 | } |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1538 | } |
| 1539 | |
Chris Wilson | 35aed2e | 2010-05-27 13:18:12 +0100 | [diff] [blame] | 1540 | static void i915_report_and_clear_eir(struct drm_device *dev) |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1541 | { |
| 1542 | struct drm_i915_private *dev_priv = dev->dev_private; |
Ben Widawsky | bd9854f | 2012-08-23 15:18:09 -0700 | [diff] [blame] | 1543 | uint32_t instdone[I915_NUM_INSTDONE_REG]; |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1544 | u32 eir = I915_READ(EIR); |
Ben Widawsky | 050ee91 | 2012-08-22 11:32:15 -0700 | [diff] [blame] | 1545 | int pipe, i; |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1546 | |
Chris Wilson | 35aed2e | 2010-05-27 13:18:12 +0100 | [diff] [blame] | 1547 | if (!eir) |
| 1548 | return; |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1549 | |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1550 | pr_err("render error detected, EIR: 0x%08x\n", eir); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1551 | |
Ben Widawsky | bd9854f | 2012-08-23 15:18:09 -0700 | [diff] [blame] | 1552 | i915_get_extra_instdone(dev, instdone); |
| 1553 | |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1554 | if (IS_G4X(dev)) { |
| 1555 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { |
| 1556 | u32 ipeir = I915_READ(IPEIR_I965); |
| 1557 | |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1558 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
| 1559 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); |
Ben Widawsky | 050ee91 | 2012-08-22 11:32:15 -0700 | [diff] [blame] | 1560 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
| 1561 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1562 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1563 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1564 | I915_WRITE(IPEIR_I965, ipeir); |
Chris Wilson | 3143a2b | 2010-11-16 15:55:10 +0000 | [diff] [blame] | 1565 | POSTING_READ(IPEIR_I965); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1566 | } |
| 1567 | if (eir & GM45_ERROR_PAGE_TABLE) { |
| 1568 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1569 | pr_err("page table error\n"); |
| 1570 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1571 | I915_WRITE(PGTBL_ER, pgtbl_err); |
Chris Wilson | 3143a2b | 2010-11-16 15:55:10 +0000 | [diff] [blame] | 1572 | POSTING_READ(PGTBL_ER); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1573 | } |
| 1574 | } |
| 1575 | |
Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 1576 | if (!IS_GEN2(dev)) { |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1577 | if (eir & I915_ERROR_PAGE_TABLE) { |
| 1578 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1579 | pr_err("page table error\n"); |
| 1580 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1581 | I915_WRITE(PGTBL_ER, pgtbl_err); |
Chris Wilson | 3143a2b | 2010-11-16 15:55:10 +0000 | [diff] [blame] | 1582 | POSTING_READ(PGTBL_ER); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1583 | } |
| 1584 | } |
| 1585 | |
| 1586 | if (eir & I915_ERROR_MEMORY_REFRESH) { |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1587 | pr_err("memory refresh error:\n"); |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 1588 | for_each_pipe(pipe) |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1589 | pr_err("pipe %c stat: 0x%08x\n", |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 1590 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1591 | /* pipestat has already been acked */ |
| 1592 | } |
| 1593 | if (eir & I915_ERROR_INSTRUCTION) { |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1594 | pr_err("instruction error\n"); |
| 1595 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); |
Ben Widawsky | 050ee91 | 2012-08-22 11:32:15 -0700 | [diff] [blame] | 1596 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
| 1597 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); |
Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 1598 | if (INTEL_INFO(dev)->gen < 4) { |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1599 | u32 ipeir = I915_READ(IPEIR); |
| 1600 | |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1601 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); |
| 1602 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1603 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1604 | I915_WRITE(IPEIR, ipeir); |
Chris Wilson | 3143a2b | 2010-11-16 15:55:10 +0000 | [diff] [blame] | 1605 | POSTING_READ(IPEIR); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1606 | } else { |
| 1607 | u32 ipeir = I915_READ(IPEIR_I965); |
| 1608 | |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1609 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
| 1610 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1611 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
Joe Perches | a70491c | 2012-03-18 13:00:11 -0700 | [diff] [blame] | 1612 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1613 | I915_WRITE(IPEIR_I965, ipeir); |
Chris Wilson | 3143a2b | 2010-11-16 15:55:10 +0000 | [diff] [blame] | 1614 | POSTING_READ(IPEIR_I965); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1615 | } |
| 1616 | } |
| 1617 | |
| 1618 | I915_WRITE(EIR, eir); |
Chris Wilson | 3143a2b | 2010-11-16 15:55:10 +0000 | [diff] [blame] | 1619 | POSTING_READ(EIR); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1620 | eir = I915_READ(EIR); |
| 1621 | if (eir) { |
| 1622 | /* |
| 1623 | * some errors might have become stuck, |
| 1624 | * mask them. |
| 1625 | */ |
| 1626 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); |
| 1627 | I915_WRITE(EMR, I915_READ(EMR) | eir); |
| 1628 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
| 1629 | } |
Chris Wilson | 35aed2e | 2010-05-27 13:18:12 +0100 | [diff] [blame] | 1630 | } |
| 1631 | |
| 1632 | /** |
| 1633 | * i915_handle_error - handle an error interrupt |
| 1634 | * @dev: drm device |
| 1635 | * |
| 1636 | * Do some basic checking of regsiter state at error interrupt time and |
| 1637 | * dump it to the syslog. Also call i915_capture_error_state() to make |
| 1638 | * sure we get a record and make it available in debugfs. Fire a uevent |
| 1639 | * so userspace knows something bad happened (should trigger collection |
| 1640 | * of a ring dump etc.). |
| 1641 | */ |
Chris Wilson | 527f9e9 | 2010-11-11 01:16:58 +0000 | [diff] [blame] | 1642 | void i915_handle_error(struct drm_device *dev, bool wedged) |
Chris Wilson | 35aed2e | 2010-05-27 13:18:12 +0100 | [diff] [blame] | 1643 | { |
| 1644 | struct drm_i915_private *dev_priv = dev->dev_private; |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 1645 | struct intel_ring_buffer *ring; |
| 1646 | int i; |
Chris Wilson | 35aed2e | 2010-05-27 13:18:12 +0100 | [diff] [blame] | 1647 | |
| 1648 | i915_capture_error_state(dev); |
| 1649 | i915_report_and_clear_eir(dev); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1650 | |
Ben Gamari | ba1234d | 2009-09-14 17:48:47 -0400 | [diff] [blame] | 1651 | if (wedged) { |
Daniel Vetter | f69061b | 2012-12-06 09:01:42 +0100 | [diff] [blame] | 1652 | atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, |
| 1653 | &dev_priv->gpu_error.reset_counter); |
Ben Gamari | ba1234d | 2009-09-14 17:48:47 -0400 | [diff] [blame] | 1654 | |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 1655 | /* |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1656 | * Wakeup waiting processes so that the reset work item |
| 1657 | * doesn't deadlock trying to grab various locks. |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 1658 | */ |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 1659 | for_each_ring(ring, dev_priv, i) |
| 1660 | wake_up_all(&ring->irq_queue); |
Ben Gamari | 11ed50e | 2009-09-14 17:48:45 -0400 | [diff] [blame] | 1661 | } |
| 1662 | |
Daniel Vetter | 99584db | 2012-11-14 17:14:04 +0100 | [diff] [blame] | 1663 | queue_work(dev_priv->wq, &dev_priv->gpu_error.work); |
Jesse Barnes | 8a90523 | 2009-07-11 16:48:03 -0400 | [diff] [blame] | 1664 | } |
| 1665 | |
Ville Syrjälä | 21ad833 | 2013-02-19 15:16:39 +0200 | [diff] [blame] | 1666 | static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) |
Simon Farnsworth | 4e5359c | 2010-09-01 17:47:52 +0100 | [diff] [blame] | 1667 | { |
| 1668 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1669 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
| 1670 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1671 | struct drm_i915_gem_object *obj; |
Simon Farnsworth | 4e5359c | 2010-09-01 17:47:52 +0100 | [diff] [blame] | 1672 | struct intel_unpin_work *work; |
| 1673 | unsigned long flags; |
| 1674 | bool stall_detected; |
| 1675 | |
| 1676 | /* Ignore early vblank irqs */ |
| 1677 | if (intel_crtc == NULL) |
| 1678 | return; |
| 1679 | |
| 1680 | spin_lock_irqsave(&dev->event_lock, flags); |
| 1681 | work = intel_crtc->unpin_work; |
| 1682 | |
Chris Wilson | e7d841c | 2012-12-03 11:36:30 +0000 | [diff] [blame] | 1683 | if (work == NULL || |
| 1684 | atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || |
| 1685 | !work->enable_stall_check) { |
Simon Farnsworth | 4e5359c | 2010-09-01 17:47:52 +0100 | [diff] [blame] | 1686 | /* Either the pending flip IRQ arrived, or we're too early. Don't check */ |
| 1687 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 1688 | return; |
| 1689 | } |
| 1690 | |
| 1691 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1692 | obj = work->pending_flip_obj; |
Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 1693 | if (INTEL_INFO(dev)->gen >= 4) { |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 1694 | int dspsurf = DSPSURF(intel_crtc->plane); |
Armin Reese | 446f254 | 2012-03-30 16:20:16 -0700 | [diff] [blame] | 1695 | stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == |
Ben Widawsky | f343c5f | 2013-07-05 14:41:04 -0700 | [diff] [blame] | 1696 | i915_gem_obj_ggtt_offset(obj); |
Simon Farnsworth | 4e5359c | 2010-09-01 17:47:52 +0100 | [diff] [blame] | 1697 | } else { |
Jesse Barnes | 9db4a9c | 2011-02-07 12:26:52 -0800 | [diff] [blame] | 1698 | int dspaddr = DSPADDR(intel_crtc->plane); |
Ben Widawsky | f343c5f | 2013-07-05 14:41:04 -0700 | [diff] [blame] | 1699 | stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + |
Ville Syrjälä | 01f2c77 | 2011-12-20 00:06:49 +0200 | [diff] [blame] | 1700 | crtc->y * crtc->fb->pitches[0] + |
Simon Farnsworth | 4e5359c | 2010-09-01 17:47:52 +0100 | [diff] [blame] | 1701 | crtc->x * crtc->fb->bits_per_pixel/8); |
| 1702 | } |
| 1703 | |
| 1704 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 1705 | |
| 1706 | if (stall_detected) { |
| 1707 | DRM_DEBUG_DRIVER("Pageflip stall detected\n"); |
| 1708 | intel_prepare_page_flip(dev, intel_crtc->plane); |
| 1709 | } |
| 1710 | } |
| 1711 | |
Keith Packard | 42f52ef | 2008-10-18 19:39:29 -0700 | [diff] [blame] | 1712 | /* Called from drm generic code, passed 'crtc' which |
| 1713 | * we use as a pipe index |
| 1714 | */ |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 1715 | static int i915_enable_vblank(struct drm_device *dev, int pipe) |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 1716 | { |
| 1717 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Keith Packard | e9d21d7 | 2008-10-16 11:31:38 -0700 | [diff] [blame] | 1718 | unsigned long irqflags; |
Jesse Barnes | 71e0ffa | 2009-01-08 10:42:15 -0800 | [diff] [blame] | 1719 | |
Chris Wilson | 5eddb70 | 2010-09-11 13:48:45 +0100 | [diff] [blame] | 1720 | if (!i915_pipe_enabled(dev, pipe)) |
Jesse Barnes | 71e0ffa | 2009-01-08 10:42:15 -0800 | [diff] [blame] | 1721 | return -EINVAL; |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 1722 | |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 1723 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
Jesse Barnes | f796cf8 | 2011-04-07 13:58:17 -0700 | [diff] [blame] | 1724 | if (INTEL_INFO(dev)->gen >= 4) |
Keith Packard | 7c46358 | 2008-11-04 02:03:27 -0800 | [diff] [blame] | 1725 | i915_enable_pipestat(dev_priv, pipe, |
| 1726 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
Keith Packard | e9d21d7 | 2008-10-16 11:31:38 -0700 | [diff] [blame] | 1727 | else |
Keith Packard | 7c46358 | 2008-11-04 02:03:27 -0800 | [diff] [blame] | 1728 | i915_enable_pipestat(dev_priv, pipe, |
| 1729 | PIPE_VBLANK_INTERRUPT_ENABLE); |
Chris Wilson | 8692d00e | 2011-02-05 10:08:21 +0000 | [diff] [blame] | 1730 | |
| 1731 | /* maintain vblank delivery even in deep C-states */ |
| 1732 | if (dev_priv->info->gen == 3) |
Daniel Vetter | 6b26c86 | 2012-04-24 14:04:12 +0200 | [diff] [blame] | 1733 | I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 1734 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
Chris Wilson | 8692d00e | 2011-02-05 10:08:21 +0000 | [diff] [blame] | 1735 | |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 1736 | return 0; |
| 1737 | } |
| 1738 | |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 1739 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) |
Jesse Barnes | f796cf8 | 2011-04-07 13:58:17 -0700 | [diff] [blame] | 1740 | { |
| 1741 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1742 | unsigned long irqflags; |
Paulo Zanoni | b518421 | 2013-07-12 20:00:08 -0300 | [diff] [blame] | 1743 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : |
| 1744 | DE_PIPE_VBLANK_ILK(pipe); |
Jesse Barnes | f796cf8 | 2011-04-07 13:58:17 -0700 | [diff] [blame] | 1745 | |
| 1746 | if (!i915_pipe_enabled(dev, pipe)) |
| 1747 | return -EINVAL; |
| 1748 | |
| 1749 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
Paulo Zanoni | b518421 | 2013-07-12 20:00:08 -0300 | [diff] [blame] | 1750 | ironlake_enable_display_irq(dev_priv, bit); |
Jesse Barnes | b1f14ad | 2011-04-06 12:13:38 -0700 | [diff] [blame] | 1751 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1752 | |
| 1753 | return 0; |
| 1754 | } |
| 1755 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1756 | static int valleyview_enable_vblank(struct drm_device *dev, int pipe) |
| 1757 | { |
| 1758 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1759 | unsigned long irqflags; |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 1760 | u32 imr; |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1761 | |
| 1762 | if (!i915_pipe_enabled(dev, pipe)) |
| 1763 | return -EINVAL; |
| 1764 | |
| 1765 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1766 | imr = I915_READ(VLV_IMR); |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 1767 | if (pipe == 0) |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1768 | imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 1769 | else |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1770 | imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1771 | I915_WRITE(VLV_IMR, imr); |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 1772 | i915_enable_pipestat(dev_priv, pipe, |
| 1773 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1774 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1775 | |
| 1776 | return 0; |
| 1777 | } |
| 1778 | |
Keith Packard | 42f52ef | 2008-10-18 19:39:29 -0700 | [diff] [blame] | 1779 | /* Called from drm generic code, passed 'crtc' which |
| 1780 | * we use as a pipe index |
| 1781 | */ |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 1782 | static void i915_disable_vblank(struct drm_device *dev, int pipe) |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 1783 | { |
| 1784 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Keith Packard | e9d21d7 | 2008-10-16 11:31:38 -0700 | [diff] [blame] | 1785 | unsigned long irqflags; |
Jesse Barnes | 0a3e67a | 2008-09-30 12:14:26 -0700 | [diff] [blame] | 1786 | |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 1787 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
Chris Wilson | 8692d00e | 2011-02-05 10:08:21 +0000 | [diff] [blame] | 1788 | if (dev_priv->info->gen == 3) |
Daniel Vetter | 6b26c86 | 2012-04-24 14:04:12 +0200 | [diff] [blame] | 1789 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); |
Chris Wilson | 8692d00e | 2011-02-05 10:08:21 +0000 | [diff] [blame] | 1790 | |
Jesse Barnes | f796cf8 | 2011-04-07 13:58:17 -0700 | [diff] [blame] | 1791 | i915_disable_pipestat(dev_priv, pipe, |
| 1792 | PIPE_VBLANK_INTERRUPT_ENABLE | |
| 1793 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
| 1794 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1795 | } |
| 1796 | |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 1797 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) |
Jesse Barnes | f796cf8 | 2011-04-07 13:58:17 -0700 | [diff] [blame] | 1798 | { |
| 1799 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1800 | unsigned long irqflags; |
Paulo Zanoni | b518421 | 2013-07-12 20:00:08 -0300 | [diff] [blame] | 1801 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : |
| 1802 | DE_PIPE_VBLANK_ILK(pipe); |
Jesse Barnes | f796cf8 | 2011-04-07 13:58:17 -0700 | [diff] [blame] | 1803 | |
| 1804 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
Paulo Zanoni | b518421 | 2013-07-12 20:00:08 -0300 | [diff] [blame] | 1805 | ironlake_disable_display_irq(dev_priv, bit); |
Jesse Barnes | b1f14ad | 2011-04-06 12:13:38 -0700 | [diff] [blame] | 1806 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1807 | } |
| 1808 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1809 | static void valleyview_disable_vblank(struct drm_device *dev, int pipe) |
| 1810 | { |
| 1811 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1812 | unsigned long irqflags; |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 1813 | u32 imr; |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1814 | |
| 1815 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 1816 | i915_disable_pipestat(dev_priv, pipe, |
| 1817 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1818 | imr = I915_READ(VLV_IMR); |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 1819 | if (pipe == 0) |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1820 | imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 1821 | else |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1822 | imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1823 | I915_WRITE(VLV_IMR, imr); |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 1824 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1825 | } |
| 1826 | |
Chris Wilson | 893eead | 2010-10-27 14:44:35 +0100 | [diff] [blame] | 1827 | static u32 |
| 1828 | ring_last_seqno(struct intel_ring_buffer *ring) |
Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1829 | { |
Chris Wilson | 893eead | 2010-10-27 14:44:35 +0100 | [diff] [blame] | 1830 | return list_entry(ring->request_list.prev, |
| 1831 | struct drm_i915_gem_request, list)->seqno; |
| 1832 | } |
| 1833 | |
Chris Wilson | 9107e9d | 2013-06-10 11:20:20 +0100 | [diff] [blame] | 1834 | static bool |
| 1835 | ring_idle(struct intel_ring_buffer *ring, u32 seqno) |
Chris Wilson | 893eead | 2010-10-27 14:44:35 +0100 | [diff] [blame] | 1836 | { |
Chris Wilson | 9107e9d | 2013-06-10 11:20:20 +0100 | [diff] [blame] | 1837 | return (list_empty(&ring->request_list) || |
| 1838 | i915_seqno_passed(seqno, ring_last_seqno(ring))); |
Ben Gamari | f65d942 | 2009-09-14 17:48:44 -0400 | [diff] [blame] | 1839 | } |
| 1840 | |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1841 | static struct intel_ring_buffer * |
| 1842 | semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) |
Chris Wilson | a24a11e | 2013-03-14 17:52:05 +0200 | [diff] [blame] | 1843 | { |
| 1844 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1845 | u32 cmd, ipehr, acthd, acthd_min; |
Chris Wilson | a24a11e | 2013-03-14 17:52:05 +0200 | [diff] [blame] | 1846 | |
| 1847 | ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); |
| 1848 | if ((ipehr & ~(0x3 << 16)) != |
| 1849 | (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1850 | return NULL; |
Chris Wilson | a24a11e | 2013-03-14 17:52:05 +0200 | [diff] [blame] | 1851 | |
| 1852 | /* ACTHD is likely pointing to the dword after the actual command, |
| 1853 | * so scan backwards until we find the MBOX. |
| 1854 | */ |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1855 | acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; |
Chris Wilson | a24a11e | 2013-03-14 17:52:05 +0200 | [diff] [blame] | 1856 | acthd_min = max((int)acthd - 3 * 4, 0); |
| 1857 | do { |
| 1858 | cmd = ioread32(ring->virtual_start + acthd); |
| 1859 | if (cmd == ipehr) |
| 1860 | break; |
| 1861 | |
| 1862 | acthd -= 4; |
| 1863 | if (acthd < acthd_min) |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1864 | return NULL; |
Chris Wilson | a24a11e | 2013-03-14 17:52:05 +0200 | [diff] [blame] | 1865 | } while (1); |
| 1866 | |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1867 | *seqno = ioread32(ring->virtual_start+acthd+4)+1; |
| 1868 | return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; |
Chris Wilson | a24a11e | 2013-03-14 17:52:05 +0200 | [diff] [blame] | 1869 | } |
| 1870 | |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1871 | static int semaphore_passed(struct intel_ring_buffer *ring) |
| 1872 | { |
| 1873 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
| 1874 | struct intel_ring_buffer *signaller; |
| 1875 | u32 seqno, ctl; |
| 1876 | |
| 1877 | ring->hangcheck.deadlock = true; |
| 1878 | |
| 1879 | signaller = semaphore_waits_for(ring, &seqno); |
| 1880 | if (signaller == NULL || signaller->hangcheck.deadlock) |
| 1881 | return -1; |
| 1882 | |
| 1883 | /* cursory check for an unkickable deadlock */ |
| 1884 | ctl = I915_READ_CTL(signaller); |
| 1885 | if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) |
| 1886 | return -1; |
| 1887 | |
| 1888 | return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); |
| 1889 | } |
| 1890 | |
| 1891 | static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) |
| 1892 | { |
| 1893 | struct intel_ring_buffer *ring; |
| 1894 | int i; |
| 1895 | |
| 1896 | for_each_ring(ring, dev_priv, i) |
| 1897 | ring->hangcheck.deadlock = false; |
| 1898 | } |
| 1899 | |
Mika Kuoppala | ad8beae | 2013-06-12 12:35:32 +0300 | [diff] [blame] | 1900 | static enum intel_ring_hangcheck_action |
| 1901 | ring_stuck(struct intel_ring_buffer *ring, u32 acthd) |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 1902 | { |
| 1903 | struct drm_device *dev = ring->dev; |
| 1904 | struct drm_i915_private *dev_priv = dev->dev_private; |
Chris Wilson | 9107e9d | 2013-06-10 11:20:20 +0100 | [diff] [blame] | 1905 | u32 tmp; |
| 1906 | |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1907 | if (ring->hangcheck.acthd != acthd) |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 1908 | return HANGCHECK_ACTIVE; |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1909 | |
Chris Wilson | 9107e9d | 2013-06-10 11:20:20 +0100 | [diff] [blame] | 1910 | if (IS_GEN2(dev)) |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 1911 | return HANGCHECK_HUNG; |
Chris Wilson | 9107e9d | 2013-06-10 11:20:20 +0100 | [diff] [blame] | 1912 | |
| 1913 | /* Is the chip hanging on a WAIT_FOR_EVENT? |
| 1914 | * If so we can simply poke the RB_WAIT bit |
| 1915 | * and break the hang. This should work on |
| 1916 | * all but the second generation chipsets. |
| 1917 | */ |
| 1918 | tmp = I915_READ_CTL(ring); |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 1919 | if (tmp & RING_WAIT) { |
| 1920 | DRM_ERROR("Kicking stuck wait on %s\n", |
| 1921 | ring->name); |
| 1922 | I915_WRITE_CTL(ring, tmp); |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 1923 | return HANGCHECK_KICK; |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 1924 | } |
Chris Wilson | a24a11e | 2013-03-14 17:52:05 +0200 | [diff] [blame] | 1925 | |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1926 | if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { |
| 1927 | switch (semaphore_passed(ring)) { |
| 1928 | default: |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 1929 | return HANGCHECK_HUNG; |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1930 | case 1: |
| 1931 | DRM_ERROR("Kicking stuck semaphore on %s\n", |
| 1932 | ring->name); |
| 1933 | I915_WRITE_CTL(ring, tmp); |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 1934 | return HANGCHECK_KICK; |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1935 | case 0: |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 1936 | return HANGCHECK_WAIT; |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1937 | } |
Chris Wilson | 9107e9d | 2013-06-10 11:20:20 +0100 | [diff] [blame] | 1938 | } |
Mika Kuoppala | ed5cbb0 | 2013-05-13 16:32:11 +0300 | [diff] [blame] | 1939 | |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 1940 | return HANGCHECK_HUNG; |
Mika Kuoppala | ed5cbb0 | 2013-05-13 16:32:11 +0300 | [diff] [blame] | 1941 | } |
| 1942 | |
Ben Gamari | f65d942 | 2009-09-14 17:48:44 -0400 | [diff] [blame] | 1943 | /** |
| 1944 | * This is called when the chip hasn't reported back with completed |
Mika Kuoppala | 05407ff | 2013-05-30 09:04:29 +0300 | [diff] [blame] | 1945 | * batchbuffers in a long time. We keep track per ring seqno progress and |
| 1946 | * if there are no progress, hangcheck score for that ring is increased. |
| 1947 | * Further, acthd is inspected to see if the ring is stuck. On stuck case |
| 1948 | * we kick the ring. If we see no progress on three subsequent calls |
| 1949 | * we assume chip is wedged and try to fix it by resetting the chip. |
Ben Gamari | f65d942 | 2009-09-14 17:48:44 -0400 | [diff] [blame] | 1950 | */ |
Damien Lespiau | a658b5d | 2013-08-08 22:28:56 +0100 | [diff] [blame] | 1951 | static void i915_hangcheck_elapsed(unsigned long data) |
Ben Gamari | f65d942 | 2009-09-14 17:48:44 -0400 | [diff] [blame] | 1952 | { |
| 1953 | struct drm_device *dev = (struct drm_device *)data; |
| 1954 | drm_i915_private_t *dev_priv = dev->dev_private; |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 1955 | struct intel_ring_buffer *ring; |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 1956 | int i; |
Mika Kuoppala | 05407ff | 2013-05-30 09:04:29 +0300 | [diff] [blame] | 1957 | int busy_count = 0, rings_hung = 0; |
Chris Wilson | 9107e9d | 2013-06-10 11:20:20 +0100 | [diff] [blame] | 1958 | bool stuck[I915_NUM_RINGS] = { 0 }; |
| 1959 | #define BUSY 1 |
| 1960 | #define KICK 5 |
| 1961 | #define HUNG 20 |
| 1962 | #define FIRE 30 |
Chris Wilson | 893eead | 2010-10-27 14:44:35 +0100 | [diff] [blame] | 1963 | |
Ben Widawsky | 3e0dc6b | 2011-06-29 10:26:42 -0700 | [diff] [blame] | 1964 | if (!i915_enable_hangcheck) |
| 1965 | return; |
| 1966 | |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 1967 | for_each_ring(ring, dev_priv, i) { |
Mika Kuoppala | 05407ff | 2013-05-30 09:04:29 +0300 | [diff] [blame] | 1968 | u32 seqno, acthd; |
Chris Wilson | 9107e9d | 2013-06-10 11:20:20 +0100 | [diff] [blame] | 1969 | bool busy = true; |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 1970 | |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1971 | semaphore_clear_deadlocks(dev_priv); |
| 1972 | |
Mika Kuoppala | 05407ff | 2013-05-30 09:04:29 +0300 | [diff] [blame] | 1973 | seqno = ring->get_seqno(ring, false); |
| 1974 | acthd = intel_ring_get_active_head(ring); |
Chris Wilson | d1e61e7 | 2012-04-10 17:00:41 +0100 | [diff] [blame] | 1975 | |
Chris Wilson | 9107e9d | 2013-06-10 11:20:20 +0100 | [diff] [blame] | 1976 | if (ring->hangcheck.seqno == seqno) { |
| 1977 | if (ring_idle(ring, seqno)) { |
| 1978 | if (waitqueue_active(&ring->irq_queue)) { |
| 1979 | /* Issue a wake-up to catch stuck h/w. */ |
| 1980 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", |
| 1981 | ring->name); |
| 1982 | wake_up_all(&ring->irq_queue); |
| 1983 | ring->hangcheck.score += HUNG; |
| 1984 | } else |
| 1985 | busy = false; |
Mika Kuoppala | 05407ff | 2013-05-30 09:04:29 +0300 | [diff] [blame] | 1986 | } else { |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 1987 | /* We always increment the hangcheck score |
| 1988 | * if the ring is busy and still processing |
| 1989 | * the same request, so that no single request |
| 1990 | * can run indefinitely (such as a chain of |
| 1991 | * batches). The only time we do not increment |
| 1992 | * the hangcheck score on this ring, if this |
| 1993 | * ring is in a legitimate wait for another |
| 1994 | * ring. In that case the waiting ring is a |
| 1995 | * victim and we want to be sure we catch the |
| 1996 | * right culprit. Then every time we do kick |
| 1997 | * the ring, add a small increment to the |
| 1998 | * score so that we can catch a batch that is |
| 1999 | * being repeatedly kicked and so responsible |
| 2000 | * for stalling the machine. |
| 2001 | */ |
Mika Kuoppala | ad8beae | 2013-06-12 12:35:32 +0300 | [diff] [blame] | 2002 | ring->hangcheck.action = ring_stuck(ring, |
| 2003 | acthd); |
| 2004 | |
| 2005 | switch (ring->hangcheck.action) { |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 2006 | case HANGCHECK_WAIT: |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 2007 | break; |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 2008 | case HANGCHECK_ACTIVE: |
Jani Nikula | ea04cb3 | 2013-08-11 12:44:02 +0300 | [diff] [blame] | 2009 | ring->hangcheck.score += BUSY; |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 2010 | break; |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 2011 | case HANGCHECK_KICK: |
Jani Nikula | ea04cb3 | 2013-08-11 12:44:02 +0300 | [diff] [blame] | 2012 | ring->hangcheck.score += KICK; |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 2013 | break; |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 2014 | case HANGCHECK_HUNG: |
Jani Nikula | ea04cb3 | 2013-08-11 12:44:02 +0300 | [diff] [blame] | 2015 | ring->hangcheck.score += HUNG; |
Chris Wilson | 6274f21 | 2013-06-10 11:20:21 +0100 | [diff] [blame] | 2016 | stuck[i] = true; |
| 2017 | break; |
| 2018 | } |
Mika Kuoppala | 05407ff | 2013-05-30 09:04:29 +0300 | [diff] [blame] | 2019 | } |
Chris Wilson | 9107e9d | 2013-06-10 11:20:20 +0100 | [diff] [blame] | 2020 | } else { |
| 2021 | /* Gradually reduce the count so that we catch DoS |
| 2022 | * attempts across multiple batches. |
| 2023 | */ |
| 2024 | if (ring->hangcheck.score > 0) |
| 2025 | ring->hangcheck.score--; |
Chris Wilson | d1e61e7 | 2012-04-10 17:00:41 +0100 | [diff] [blame] | 2026 | } |
| 2027 | |
Mika Kuoppala | 05407ff | 2013-05-30 09:04:29 +0300 | [diff] [blame] | 2028 | ring->hangcheck.seqno = seqno; |
| 2029 | ring->hangcheck.acthd = acthd; |
Chris Wilson | 9107e9d | 2013-06-10 11:20:20 +0100 | [diff] [blame] | 2030 | busy_count += busy; |
Chris Wilson | 893eead | 2010-10-27 14:44:35 +0100 | [diff] [blame] | 2031 | } |
Eric Anholt | b9201c1 | 2010-01-08 14:25:16 -0800 | [diff] [blame] | 2032 | |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 2033 | for_each_ring(ring, dev_priv, i) { |
Chris Wilson | 9107e9d | 2013-06-10 11:20:20 +0100 | [diff] [blame] | 2034 | if (ring->hangcheck.score > FIRE) { |
Daniel Vetter | b8d88d1 | 2013-08-28 10:57:59 +0200 | [diff] [blame] | 2035 | DRM_INFO("%s on %s\n", |
| 2036 | stuck[i] ? "stuck" : "no progress", |
| 2037 | ring->name); |
Chris Wilson | a43adf0 | 2013-06-10 11:20:22 +0100 | [diff] [blame] | 2038 | rings_hung++; |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 2039 | } |
| 2040 | } |
| 2041 | |
Mika Kuoppala | 05407ff | 2013-05-30 09:04:29 +0300 | [diff] [blame] | 2042 | if (rings_hung) |
| 2043 | return i915_handle_error(dev, true); |
Ben Gamari | f65d942 | 2009-09-14 17:48:44 -0400 | [diff] [blame] | 2044 | |
Mika Kuoppala | 05407ff | 2013-05-30 09:04:29 +0300 | [diff] [blame] | 2045 | if (busy_count) |
| 2046 | /* Reset timer case chip hangs without another request |
| 2047 | * being added */ |
Mika Kuoppala | 10cd45b | 2013-07-03 17:22:08 +0300 | [diff] [blame] | 2048 | i915_queue_hangcheck(dev); |
| 2049 | } |
| 2050 | |
| 2051 | void i915_queue_hangcheck(struct drm_device *dev) |
| 2052 | { |
| 2053 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2054 | if (!i915_enable_hangcheck) |
| 2055 | return; |
| 2056 | |
| 2057 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, |
| 2058 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); |
Ben Gamari | f65d942 | 2009-09-14 17:48:44 -0400 | [diff] [blame] | 2059 | } |
| 2060 | |
Paulo Zanoni | 91738a9 | 2013-06-05 14:21:51 -0300 | [diff] [blame] | 2061 | static void ibx_irq_preinstall(struct drm_device *dev) |
| 2062 | { |
| 2063 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2064 | |
| 2065 | if (HAS_PCH_NOP(dev)) |
| 2066 | return; |
| 2067 | |
| 2068 | /* south display irq */ |
| 2069 | I915_WRITE(SDEIMR, 0xffffffff); |
| 2070 | /* |
| 2071 | * SDEIER is also touched by the interrupt handler to work around missed |
| 2072 | * PCH interrupts. Hence we can't update it after the interrupt handler |
| 2073 | * is enabled - instead we unconditionally enable all PCH interrupt |
| 2074 | * sources here, but then only unmask them as needed with SDEIMR. |
| 2075 | */ |
| 2076 | I915_WRITE(SDEIER, 0xffffffff); |
| 2077 | POSTING_READ(SDEIER); |
| 2078 | } |
| 2079 | |
Daniel Vetter | d18ea1b | 2013-07-12 22:43:25 +0200 | [diff] [blame] | 2080 | static void gen5_gt_irq_preinstall(struct drm_device *dev) |
| 2081 | { |
| 2082 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2083 | |
| 2084 | /* and GT */ |
| 2085 | I915_WRITE(GTIMR, 0xffffffff); |
| 2086 | I915_WRITE(GTIER, 0x0); |
| 2087 | POSTING_READ(GTIER); |
| 2088 | |
| 2089 | if (INTEL_INFO(dev)->gen >= 6) { |
| 2090 | /* and PM */ |
| 2091 | I915_WRITE(GEN6_PMIMR, 0xffffffff); |
| 2092 | I915_WRITE(GEN6_PMIER, 0x0); |
| 2093 | POSTING_READ(GEN6_PMIER); |
| 2094 | } |
| 2095 | } |
| 2096 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2097 | /* drm_dma.h hooks |
| 2098 | */ |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 2099 | static void ironlake_irq_preinstall(struct drm_device *dev) |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2100 | { |
| 2101 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2102 | |
Jesse Barnes | 4697995 | 2011-04-07 13:53:55 -0700 | [diff] [blame] | 2103 | atomic_set(&dev_priv->irq_received, 0); |
| 2104 | |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2105 | I915_WRITE(HWSTAM, 0xeffe); |
Daniel Vetter | bdfcdb6 | 2012-01-05 01:05:26 +0100 | [diff] [blame] | 2106 | |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2107 | I915_WRITE(DEIMR, 0xffffffff); |
| 2108 | I915_WRITE(DEIER, 0x0); |
Chris Wilson | 3143a2b | 2010-11-16 15:55:10 +0000 | [diff] [blame] | 2109 | POSTING_READ(DEIER); |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2110 | |
Daniel Vetter | d18ea1b | 2013-07-12 22:43:25 +0200 | [diff] [blame] | 2111 | gen5_gt_irq_preinstall(dev); |
Zhenyu Wang | c650156 | 2009-11-03 18:57:21 +0000 | [diff] [blame] | 2112 | |
Paulo Zanoni | 91738a9 | 2013-06-05 14:21:51 -0300 | [diff] [blame] | 2113 | ibx_irq_preinstall(dev); |
Ben Widawsky | 7d99163 | 2013-05-28 19:22:25 -0700 | [diff] [blame] | 2114 | } |
| 2115 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2116 | static void valleyview_irq_preinstall(struct drm_device *dev) |
| 2117 | { |
| 2118 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2119 | int pipe; |
| 2120 | |
| 2121 | atomic_set(&dev_priv->irq_received, 0); |
| 2122 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2123 | /* VLV magic */ |
| 2124 | I915_WRITE(VLV_IMR, 0); |
| 2125 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); |
| 2126 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); |
| 2127 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); |
| 2128 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2129 | /* and GT */ |
| 2130 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
| 2131 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
Daniel Vetter | d18ea1b | 2013-07-12 22:43:25 +0200 | [diff] [blame] | 2132 | |
| 2133 | gen5_gt_irq_preinstall(dev); |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2134 | |
| 2135 | I915_WRITE(DPINVGTT, 0xff); |
| 2136 | |
| 2137 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2138 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
| 2139 | for_each_pipe(pipe) |
| 2140 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
| 2141 | I915_WRITE(VLV_IIR, 0xffffffff); |
| 2142 | I915_WRITE(VLV_IMR, 0xffffffff); |
| 2143 | I915_WRITE(VLV_IER, 0x0); |
| 2144 | POSTING_READ(VLV_IER); |
| 2145 | } |
| 2146 | |
Daniel Vetter | 82a28bc | 2013-03-27 15:55:01 +0100 | [diff] [blame] | 2147 | static void ibx_hpd_irq_setup(struct drm_device *dev) |
Keith Packard | 7fe0b97 | 2011-09-19 13:31:02 -0700 | [diff] [blame] | 2148 | { |
| 2149 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Daniel Vetter | 82a28bc | 2013-03-27 15:55:01 +0100 | [diff] [blame] | 2150 | struct drm_mode_config *mode_config = &dev->mode_config; |
| 2151 | struct intel_encoder *intel_encoder; |
Daniel Vetter | fee884e | 2013-07-04 23:35:21 +0200 | [diff] [blame] | 2152 | u32 hotplug_irqs, hotplug, enabled_irqs = 0; |
Keith Packard | 7fe0b97 | 2011-09-19 13:31:02 -0700 | [diff] [blame] | 2153 | |
Daniel Vetter | 82a28bc | 2013-03-27 15:55:01 +0100 | [diff] [blame] | 2154 | if (HAS_PCH_IBX(dev)) { |
Daniel Vetter | fee884e | 2013-07-04 23:35:21 +0200 | [diff] [blame] | 2155 | hotplug_irqs = SDE_HOTPLUG_MASK; |
Daniel Vetter | 82a28bc | 2013-03-27 15:55:01 +0100 | [diff] [blame] | 2156 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
Egbert Eich | cd569ae | 2013-04-16 13:36:57 +0200 | [diff] [blame] | 2157 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
Daniel Vetter | fee884e | 2013-07-04 23:35:21 +0200 | [diff] [blame] | 2158 | enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; |
Daniel Vetter | 82a28bc | 2013-03-27 15:55:01 +0100 | [diff] [blame] | 2159 | } else { |
Daniel Vetter | fee884e | 2013-07-04 23:35:21 +0200 | [diff] [blame] | 2160 | hotplug_irqs = SDE_HOTPLUG_MASK_CPT; |
Daniel Vetter | 82a28bc | 2013-03-27 15:55:01 +0100 | [diff] [blame] | 2161 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
Egbert Eich | cd569ae | 2013-04-16 13:36:57 +0200 | [diff] [blame] | 2162 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
Daniel Vetter | fee884e | 2013-07-04 23:35:21 +0200 | [diff] [blame] | 2163 | enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; |
Daniel Vetter | 82a28bc | 2013-03-27 15:55:01 +0100 | [diff] [blame] | 2164 | } |
| 2165 | |
Daniel Vetter | fee884e | 2013-07-04 23:35:21 +0200 | [diff] [blame] | 2166 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); |
Daniel Vetter | 82a28bc | 2013-03-27 15:55:01 +0100 | [diff] [blame] | 2167 | |
| 2168 | /* |
| 2169 | * Enable digital hotplug on the PCH, and configure the DP short pulse |
| 2170 | * duration to 2ms (which is the minimum in the Display Port spec) |
| 2171 | * |
| 2172 | * This register is the same on all known PCH chips. |
| 2173 | */ |
Keith Packard | 7fe0b97 | 2011-09-19 13:31:02 -0700 | [diff] [blame] | 2174 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
| 2175 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); |
| 2176 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; |
| 2177 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; |
| 2178 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; |
| 2179 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); |
| 2180 | } |
| 2181 | |
Paulo Zanoni | d46da43 | 2013-02-08 17:35:15 -0200 | [diff] [blame] | 2182 | static void ibx_irq_postinstall(struct drm_device *dev) |
| 2183 | { |
| 2184 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Daniel Vetter | 82a28bc | 2013-03-27 15:55:01 +0100 | [diff] [blame] | 2185 | u32 mask; |
Paulo Zanoni | d46da43 | 2013-02-08 17:35:15 -0200 | [diff] [blame] | 2186 | |
Daniel Vetter | 692a04c | 2013-05-29 21:43:05 +0200 | [diff] [blame] | 2187 | if (HAS_PCH_NOP(dev)) |
| 2188 | return; |
| 2189 | |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 2190 | if (HAS_PCH_IBX(dev)) { |
| 2191 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | |
Paulo Zanoni | de032bf | 2013-04-12 17:57:58 -0300 | [diff] [blame] | 2192 | SDE_TRANSA_FIFO_UNDER | SDE_POISON; |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 2193 | } else { |
| 2194 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; |
| 2195 | |
| 2196 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); |
| 2197 | } |
Ben Widawsky | ab5c608 | 2013-04-05 13:12:41 -0700 | [diff] [blame] | 2198 | |
Paulo Zanoni | d46da43 | 2013-02-08 17:35:15 -0200 | [diff] [blame] | 2199 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
| 2200 | I915_WRITE(SDEIMR, ~mask); |
Paulo Zanoni | d46da43 | 2013-02-08 17:35:15 -0200 | [diff] [blame] | 2201 | } |
| 2202 | |
Daniel Vetter | 0a9a8c9 | 2013-07-12 22:43:26 +0200 | [diff] [blame] | 2203 | static void gen5_gt_irq_postinstall(struct drm_device *dev) |
| 2204 | { |
| 2205 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2206 | u32 pm_irqs, gt_irqs; |
| 2207 | |
| 2208 | pm_irqs = gt_irqs = 0; |
| 2209 | |
| 2210 | dev_priv->gt_irq_mask = ~0; |
| 2211 | if (HAS_L3_GPU_CACHE(dev)) { |
| 2212 | /* L3 parity interrupt is always unmasked. */ |
| 2213 | dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; |
| 2214 | gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; |
| 2215 | } |
| 2216 | |
| 2217 | gt_irqs |= GT_RENDER_USER_INTERRUPT; |
| 2218 | if (IS_GEN5(dev)) { |
| 2219 | gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | |
| 2220 | ILK_BSD_USER_INTERRUPT; |
| 2221 | } else { |
| 2222 | gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; |
| 2223 | } |
| 2224 | |
| 2225 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
| 2226 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
| 2227 | I915_WRITE(GTIER, gt_irqs); |
| 2228 | POSTING_READ(GTIER); |
| 2229 | |
| 2230 | if (INTEL_INFO(dev)->gen >= 6) { |
| 2231 | pm_irqs |= GEN6_PM_RPS_EVENTS; |
| 2232 | |
| 2233 | if (HAS_VEBOX(dev)) |
| 2234 | pm_irqs |= PM_VEBOX_USER_INTERRUPT; |
| 2235 | |
Paulo Zanoni | 605cd25 | 2013-08-06 18:57:15 -0300 | [diff] [blame] | 2236 | dev_priv->pm_irq_mask = 0xffffffff; |
Daniel Vetter | 0a9a8c9 | 2013-07-12 22:43:26 +0200 | [diff] [blame] | 2237 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); |
Paulo Zanoni | 605cd25 | 2013-08-06 18:57:15 -0300 | [diff] [blame] | 2238 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); |
Daniel Vetter | 0a9a8c9 | 2013-07-12 22:43:26 +0200 | [diff] [blame] | 2239 | I915_WRITE(GEN6_PMIER, pm_irqs); |
| 2240 | POSTING_READ(GEN6_PMIER); |
| 2241 | } |
| 2242 | } |
| 2243 | |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 2244 | static int ironlake_irq_postinstall(struct drm_device *dev) |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2245 | { |
Daniel Vetter | 4bc9d43 | 2013-06-27 13:44:58 +0200 | [diff] [blame] | 2246 | unsigned long irqflags; |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2247 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Paulo Zanoni | 8e76f8d | 2013-07-12 20:01:56 -0300 | [diff] [blame] | 2248 | u32 display_mask, extra_mask; |
| 2249 | |
| 2250 | if (INTEL_INFO(dev)->gen >= 7) { |
| 2251 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | |
| 2252 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | |
| 2253 | DE_PLANEB_FLIP_DONE_IVB | |
| 2254 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | |
| 2255 | DE_ERR_INT_IVB); |
| 2256 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | |
| 2257 | DE_PIPEA_VBLANK_IVB); |
| 2258 | |
| 2259 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); |
| 2260 | } else { |
| 2261 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
| 2262 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | |
| 2263 | DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | |
| 2264 | DE_PIPEA_FIFO_UNDERRUN | DE_POISON); |
| 2265 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; |
| 2266 | } |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2267 | |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 2268 | dev_priv->irq_mask = ~display_mask; |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2269 | |
| 2270 | /* should always can generate irq */ |
| 2271 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 2272 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
Paulo Zanoni | 8e76f8d | 2013-07-12 20:01:56 -0300 | [diff] [blame] | 2273 | I915_WRITE(DEIER, display_mask | extra_mask); |
Chris Wilson | 3143a2b | 2010-11-16 15:55:10 +0000 | [diff] [blame] | 2274 | POSTING_READ(DEIER); |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2275 | |
Daniel Vetter | 0a9a8c9 | 2013-07-12 22:43:26 +0200 | [diff] [blame] | 2276 | gen5_gt_irq_postinstall(dev); |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2277 | |
Paulo Zanoni | d46da43 | 2013-02-08 17:35:15 -0200 | [diff] [blame] | 2278 | ibx_irq_postinstall(dev); |
Keith Packard | 7fe0b97 | 2011-09-19 13:31:02 -0700 | [diff] [blame] | 2279 | |
Jesse Barnes | f97108d | 2010-01-29 11:27:07 -0800 | [diff] [blame] | 2280 | if (IS_IRONLAKE_M(dev)) { |
Daniel Vetter | 6005ce4 | 2013-06-27 13:44:59 +0200 | [diff] [blame] | 2281 | /* Enable PCU event interrupts |
| 2282 | * |
| 2283 | * spinlocking not required here for correctness since interrupt |
Daniel Vetter | 4bc9d43 | 2013-06-27 13:44:58 +0200 | [diff] [blame] | 2284 | * setup is guaranteed to run in single-threaded context. But we |
| 2285 | * need it to make the assert_spin_locked happy. */ |
| 2286 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
Jesse Barnes | f97108d | 2010-01-29 11:27:07 -0800 | [diff] [blame] | 2287 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
Daniel Vetter | 4bc9d43 | 2013-06-27 13:44:58 +0200 | [diff] [blame] | 2288 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
Jesse Barnes | f97108d | 2010-01-29 11:27:07 -0800 | [diff] [blame] | 2289 | } |
| 2290 | |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2291 | return 0; |
| 2292 | } |
| 2293 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2294 | static int valleyview_irq_postinstall(struct drm_device *dev) |
| 2295 | { |
| 2296 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2297 | u32 enable_mask; |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 2298 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; |
Daniel Vetter | b79480b | 2013-06-27 17:52:10 +0200 | [diff] [blame] | 2299 | unsigned long irqflags; |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2300 | |
| 2301 | enable_mask = I915_DISPLAY_PORT_INTERRUPT; |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 2302 | enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2303 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | |
| 2304 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2305 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
| 2306 | |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 2307 | /* |
| 2308 | *Leave vblank interrupts masked initially. enable/disable will |
| 2309 | * toggle them based on usage. |
| 2310 | */ |
| 2311 | dev_priv->irq_mask = (~enable_mask) | |
| 2312 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | |
| 2313 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2314 | |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 2315 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2316 | POSTING_READ(PORT_HOTPLUG_EN); |
| 2317 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2318 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
| 2319 | I915_WRITE(VLV_IER, enable_mask); |
| 2320 | I915_WRITE(VLV_IIR, 0xffffffff); |
| 2321 | I915_WRITE(PIPESTAT(0), 0xffff); |
| 2322 | I915_WRITE(PIPESTAT(1), 0xffff); |
| 2323 | POSTING_READ(VLV_IER); |
| 2324 | |
Daniel Vetter | b79480b | 2013-06-27 17:52:10 +0200 | [diff] [blame] | 2325 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
| 2326 | * just to make the assert_spin_locked check happy. */ |
| 2327 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 2328 | i915_enable_pipestat(dev_priv, 0, pipestat_enable); |
Daniel Vetter | 515ac2b | 2012-12-01 13:53:44 +0100 | [diff] [blame] | 2329 | i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 2330 | i915_enable_pipestat(dev_priv, 1, pipestat_enable); |
Daniel Vetter | b79480b | 2013-06-27 17:52:10 +0200 | [diff] [blame] | 2331 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
Jesse Barnes | 31acc7f | 2012-06-20 10:53:11 -0700 | [diff] [blame] | 2332 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2333 | I915_WRITE(VLV_IIR, 0xffffffff); |
| 2334 | I915_WRITE(VLV_IIR, 0xffffffff); |
| 2335 | |
Daniel Vetter | 0a9a8c9 | 2013-07-12 22:43:26 +0200 | [diff] [blame] | 2336 | gen5_gt_irq_postinstall(dev); |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2337 | |
| 2338 | /* ack & enable invalid PTE error interrupts */ |
| 2339 | #if 0 /* FIXME: add support to irq handler for checking these bits */ |
| 2340 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); |
| 2341 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); |
| 2342 | #endif |
| 2343 | |
| 2344 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 2345 | |
| 2346 | return 0; |
| 2347 | } |
| 2348 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2349 | static void valleyview_irq_uninstall(struct drm_device *dev) |
| 2350 | { |
| 2351 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2352 | int pipe; |
| 2353 | |
| 2354 | if (!dev_priv) |
| 2355 | return; |
| 2356 | |
Egbert Eich | ac4c16c | 2013-04-16 13:36:58 +0200 | [diff] [blame] | 2357 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
| 2358 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 2359 | for_each_pipe(pipe) |
| 2360 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
| 2361 | |
| 2362 | I915_WRITE(HWSTAM, 0xffffffff); |
| 2363 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2364 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
| 2365 | for_each_pipe(pipe) |
| 2366 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
| 2367 | I915_WRITE(VLV_IIR, 0xffffffff); |
| 2368 | I915_WRITE(VLV_IMR, 0xffffffff); |
| 2369 | I915_WRITE(VLV_IER, 0x0); |
| 2370 | POSTING_READ(VLV_IER); |
| 2371 | } |
| 2372 | |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 2373 | static void ironlake_irq_uninstall(struct drm_device *dev) |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2374 | { |
| 2375 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Jesse Barnes | 4697995 | 2011-04-07 13:53:55 -0700 | [diff] [blame] | 2376 | |
| 2377 | if (!dev_priv) |
| 2378 | return; |
| 2379 | |
Egbert Eich | ac4c16c | 2013-04-16 13:36:58 +0200 | [diff] [blame] | 2380 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
| 2381 | |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2382 | I915_WRITE(HWSTAM, 0xffffffff); |
| 2383 | |
| 2384 | I915_WRITE(DEIMR, 0xffffffff); |
| 2385 | I915_WRITE(DEIER, 0x0); |
| 2386 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 2387 | if (IS_GEN7(dev)) |
| 2388 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2389 | |
| 2390 | I915_WRITE(GTIMR, 0xffffffff); |
| 2391 | I915_WRITE(GTIER, 0x0); |
| 2392 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
Keith Packard | 192aac1f | 2011-09-20 10:12:44 -0700 | [diff] [blame] | 2393 | |
Ben Widawsky | ab5c608 | 2013-04-05 13:12:41 -0700 | [diff] [blame] | 2394 | if (HAS_PCH_NOP(dev)) |
| 2395 | return; |
| 2396 | |
Keith Packard | 192aac1f | 2011-09-20 10:12:44 -0700 | [diff] [blame] | 2397 | I915_WRITE(SDEIMR, 0xffffffff); |
| 2398 | I915_WRITE(SDEIER, 0x0); |
| 2399 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
Paulo Zanoni | 8664281 | 2013-04-12 17:57:57 -0300 | [diff] [blame] | 2400 | if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) |
| 2401 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); |
Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2402 | } |
| 2403 | |
Chris Wilson | c2798b1 | 2012-04-22 21:13:57 +0100 | [diff] [blame] | 2404 | static void i8xx_irq_preinstall(struct drm_device * dev) |
| 2405 | { |
| 2406 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2407 | int pipe; |
| 2408 | |
| 2409 | atomic_set(&dev_priv->irq_received, 0); |
| 2410 | |
| 2411 | for_each_pipe(pipe) |
| 2412 | I915_WRITE(PIPESTAT(pipe), 0); |
| 2413 | I915_WRITE16(IMR, 0xffff); |
| 2414 | I915_WRITE16(IER, 0x0); |
| 2415 | POSTING_READ16(IER); |
| 2416 | } |
| 2417 | |
| 2418 | static int i8xx_irq_postinstall(struct drm_device *dev) |
| 2419 | { |
| 2420 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2421 | |
Chris Wilson | c2798b1 | 2012-04-22 21:13:57 +0100 | [diff] [blame] | 2422 | I915_WRITE16(EMR, |
| 2423 | ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); |
| 2424 | |
| 2425 | /* Unmask the interrupts that we always want on. */ |
| 2426 | dev_priv->irq_mask = |
| 2427 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2428 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
| 2429 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
| 2430 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
| 2431 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
| 2432 | I915_WRITE16(IMR, dev_priv->irq_mask); |
| 2433 | |
| 2434 | I915_WRITE16(IER, |
| 2435 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2436 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
| 2437 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | |
| 2438 | I915_USER_INTERRUPT); |
| 2439 | POSTING_READ16(IER); |
| 2440 | |
| 2441 | return 0; |
| 2442 | } |
| 2443 | |
Ville Syrjälä | 90a72f8 | 2013-02-19 23:16:44 +0200 | [diff] [blame] | 2444 | /* |
| 2445 | * Returns true when a page flip has completed. |
| 2446 | */ |
| 2447 | static bool i8xx_handle_vblank(struct drm_device *dev, |
| 2448 | int pipe, u16 iir) |
| 2449 | { |
| 2450 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 2451 | u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); |
| 2452 | |
| 2453 | if (!drm_handle_vblank(dev, pipe)) |
| 2454 | return false; |
| 2455 | |
| 2456 | if ((iir & flip_pending) == 0) |
| 2457 | return false; |
| 2458 | |
| 2459 | intel_prepare_page_flip(dev, pipe); |
| 2460 | |
| 2461 | /* We detect FlipDone by looking for the change in PendingFlip from '1' |
| 2462 | * to '0' on the following vblank, i.e. IIR has the Pendingflip |
| 2463 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence |
| 2464 | * the flip is completed (no longer pending). Since this doesn't raise |
| 2465 | * an interrupt per se, we watch for the change at vblank. |
| 2466 | */ |
| 2467 | if (I915_READ16(ISR) & flip_pending) |
| 2468 | return false; |
| 2469 | |
| 2470 | intel_finish_page_flip(dev, pipe); |
| 2471 | |
| 2472 | return true; |
| 2473 | } |
| 2474 | |
Daniel Vetter | ff1f525 | 2012-10-02 15:10:55 +0200 | [diff] [blame] | 2475 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) |
Chris Wilson | c2798b1 | 2012-04-22 21:13:57 +0100 | [diff] [blame] | 2476 | { |
| 2477 | struct drm_device *dev = (struct drm_device *) arg; |
| 2478 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Chris Wilson | c2798b1 | 2012-04-22 21:13:57 +0100 | [diff] [blame] | 2479 | u16 iir, new_iir; |
| 2480 | u32 pipe_stats[2]; |
| 2481 | unsigned long irqflags; |
Chris Wilson | c2798b1 | 2012-04-22 21:13:57 +0100 | [diff] [blame] | 2482 | int pipe; |
| 2483 | u16 flip_mask = |
| 2484 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
| 2485 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
| 2486 | |
| 2487 | atomic_inc(&dev_priv->irq_received); |
| 2488 | |
| 2489 | iir = I915_READ16(IIR); |
| 2490 | if (iir == 0) |
| 2491 | return IRQ_NONE; |
| 2492 | |
| 2493 | while (iir & ~flip_mask) { |
| 2494 | /* Can't rely on pipestat interrupt bit in iir as it might |
| 2495 | * have been cleared after the pipestat interrupt was received. |
| 2496 | * It doesn't set the bit in iir again, but it still produces |
| 2497 | * interrupts (for non-MSI). |
| 2498 | */ |
| 2499 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 2500 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
| 2501 | i915_handle_error(dev, false); |
| 2502 | |
| 2503 | for_each_pipe(pipe) { |
| 2504 | int reg = PIPESTAT(pipe); |
| 2505 | pipe_stats[pipe] = I915_READ(reg); |
| 2506 | |
| 2507 | /* |
| 2508 | * Clear the PIPE*STAT regs before the IIR |
| 2509 | */ |
| 2510 | if (pipe_stats[pipe] & 0x8000ffff) { |
| 2511 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
| 2512 | DRM_DEBUG_DRIVER("pipe %c underrun\n", |
| 2513 | pipe_name(pipe)); |
| 2514 | I915_WRITE(reg, pipe_stats[pipe]); |
Chris Wilson | c2798b1 | 2012-04-22 21:13:57 +0100 | [diff] [blame] | 2515 | } |
| 2516 | } |
| 2517 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 2518 | |
| 2519 | I915_WRITE16(IIR, iir & ~flip_mask); |
| 2520 | new_iir = I915_READ16(IIR); /* Flush posted writes */ |
| 2521 | |
Daniel Vetter | d05c617 | 2012-04-26 23:28:09 +0200 | [diff] [blame] | 2522 | i915_update_dri1_breadcrumb(dev); |
Chris Wilson | c2798b1 | 2012-04-22 21:13:57 +0100 | [diff] [blame] | 2523 | |
| 2524 | if (iir & I915_USER_INTERRUPT) |
| 2525 | notify_ring(dev, &dev_priv->ring[RCS]); |
| 2526 | |
| 2527 | if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && |
Ville Syrjälä | 90a72f8 | 2013-02-19 23:16:44 +0200 | [diff] [blame] | 2528 | i8xx_handle_vblank(dev, 0, iir)) |
| 2529 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); |
Chris Wilson | c2798b1 | 2012-04-22 21:13:57 +0100 | [diff] [blame] | 2530 | |
| 2531 | if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && |
Ville Syrjälä | 90a72f8 | 2013-02-19 23:16:44 +0200 | [diff] [blame] | 2532 | i8xx_handle_vblank(dev, 1, iir)) |
| 2533 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); |
Chris Wilson | c2798b1 | 2012-04-22 21:13:57 +0100 | [diff] [blame] | 2534 | |
| 2535 | iir = new_iir; |
| 2536 | } |
| 2537 | |
| 2538 | return IRQ_HANDLED; |
| 2539 | } |
| 2540 | |
| 2541 | static void i8xx_irq_uninstall(struct drm_device * dev) |
| 2542 | { |
| 2543 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2544 | int pipe; |
| 2545 | |
Chris Wilson | c2798b1 | 2012-04-22 21:13:57 +0100 | [diff] [blame] | 2546 | for_each_pipe(pipe) { |
| 2547 | /* Clear enable bits; then clear status bits */ |
| 2548 | I915_WRITE(PIPESTAT(pipe), 0); |
| 2549 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); |
| 2550 | } |
| 2551 | I915_WRITE16(IMR, 0xffff); |
| 2552 | I915_WRITE16(IER, 0x0); |
| 2553 | I915_WRITE16(IIR, I915_READ16(IIR)); |
| 2554 | } |
| 2555 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2556 | static void i915_irq_preinstall(struct drm_device * dev) |
| 2557 | { |
| 2558 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2559 | int pipe; |
| 2560 | |
| 2561 | atomic_set(&dev_priv->irq_received, 0); |
| 2562 | |
| 2563 | if (I915_HAS_HOTPLUG(dev)) { |
| 2564 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2565 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
| 2566 | } |
| 2567 | |
Chris Wilson | 00d98eb | 2012-04-24 22:59:48 +0100 | [diff] [blame] | 2568 | I915_WRITE16(HWSTAM, 0xeffe); |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2569 | for_each_pipe(pipe) |
| 2570 | I915_WRITE(PIPESTAT(pipe), 0); |
| 2571 | I915_WRITE(IMR, 0xffffffff); |
| 2572 | I915_WRITE(IER, 0x0); |
| 2573 | POSTING_READ(IER); |
| 2574 | } |
| 2575 | |
| 2576 | static int i915_irq_postinstall(struct drm_device *dev) |
| 2577 | { |
| 2578 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Chris Wilson | 38bde18 | 2012-04-24 22:59:50 +0100 | [diff] [blame] | 2579 | u32 enable_mask; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2580 | |
Chris Wilson | 38bde18 | 2012-04-24 22:59:50 +0100 | [diff] [blame] | 2581 | I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); |
| 2582 | |
| 2583 | /* Unmask the interrupts that we always want on. */ |
| 2584 | dev_priv->irq_mask = |
| 2585 | ~(I915_ASLE_INTERRUPT | |
| 2586 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2587 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
| 2588 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
| 2589 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
| 2590 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
| 2591 | |
| 2592 | enable_mask = |
| 2593 | I915_ASLE_INTERRUPT | |
| 2594 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2595 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
| 2596 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | |
| 2597 | I915_USER_INTERRUPT; |
| 2598 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2599 | if (I915_HAS_HOTPLUG(dev)) { |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 2600 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2601 | POSTING_READ(PORT_HOTPLUG_EN); |
| 2602 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2603 | /* Enable in IER... */ |
| 2604 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
| 2605 | /* and unmask in IMR */ |
| 2606 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; |
| 2607 | } |
| 2608 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2609 | I915_WRITE(IMR, dev_priv->irq_mask); |
| 2610 | I915_WRITE(IER, enable_mask); |
| 2611 | POSTING_READ(IER); |
| 2612 | |
Jani Nikula | f49e38d | 2013-04-29 13:02:54 +0300 | [diff] [blame] | 2613 | i915_enable_asle_pipestat(dev); |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 2614 | |
| 2615 | return 0; |
| 2616 | } |
| 2617 | |
Ville Syrjälä | 90a72f8 | 2013-02-19 23:16:44 +0200 | [diff] [blame] | 2618 | /* |
| 2619 | * Returns true when a page flip has completed. |
| 2620 | */ |
| 2621 | static bool i915_handle_vblank(struct drm_device *dev, |
| 2622 | int plane, int pipe, u32 iir) |
| 2623 | { |
| 2624 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 2625 | u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); |
| 2626 | |
| 2627 | if (!drm_handle_vblank(dev, pipe)) |
| 2628 | return false; |
| 2629 | |
| 2630 | if ((iir & flip_pending) == 0) |
| 2631 | return false; |
| 2632 | |
| 2633 | intel_prepare_page_flip(dev, plane); |
| 2634 | |
| 2635 | /* We detect FlipDone by looking for the change in PendingFlip from '1' |
| 2636 | * to '0' on the following vblank, i.e. IIR has the Pendingflip |
| 2637 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence |
| 2638 | * the flip is completed (no longer pending). Since this doesn't raise |
| 2639 | * an interrupt per se, we watch for the change at vblank. |
| 2640 | */ |
| 2641 | if (I915_READ(ISR) & flip_pending) |
| 2642 | return false; |
| 2643 | |
| 2644 | intel_finish_page_flip(dev, pipe); |
| 2645 | |
| 2646 | return true; |
| 2647 | } |
| 2648 | |
Daniel Vetter | ff1f525 | 2012-10-02 15:10:55 +0200 | [diff] [blame] | 2649 | static irqreturn_t i915_irq_handler(int irq, void *arg) |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2650 | { |
| 2651 | struct drm_device *dev = (struct drm_device *) arg; |
| 2652 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Chris Wilson | 8291ee9 | 2012-04-24 22:59:47 +0100 | [diff] [blame] | 2653 | u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2654 | unsigned long irqflags; |
Chris Wilson | 38bde18 | 2012-04-24 22:59:50 +0100 | [diff] [blame] | 2655 | u32 flip_mask = |
| 2656 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
| 2657 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
Chris Wilson | 38bde18 | 2012-04-24 22:59:50 +0100 | [diff] [blame] | 2658 | int pipe, ret = IRQ_NONE; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2659 | |
| 2660 | atomic_inc(&dev_priv->irq_received); |
| 2661 | |
| 2662 | iir = I915_READ(IIR); |
Chris Wilson | 38bde18 | 2012-04-24 22:59:50 +0100 | [diff] [blame] | 2663 | do { |
| 2664 | bool irq_received = (iir & ~flip_mask) != 0; |
Chris Wilson | 8291ee9 | 2012-04-24 22:59:47 +0100 | [diff] [blame] | 2665 | bool blc_event = false; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2666 | |
| 2667 | /* Can't rely on pipestat interrupt bit in iir as it might |
| 2668 | * have been cleared after the pipestat interrupt was received. |
| 2669 | * It doesn't set the bit in iir again, but it still produces |
| 2670 | * interrupts (for non-MSI). |
| 2671 | */ |
| 2672 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 2673 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
| 2674 | i915_handle_error(dev, false); |
| 2675 | |
| 2676 | for_each_pipe(pipe) { |
| 2677 | int reg = PIPESTAT(pipe); |
| 2678 | pipe_stats[pipe] = I915_READ(reg); |
| 2679 | |
Chris Wilson | 38bde18 | 2012-04-24 22:59:50 +0100 | [diff] [blame] | 2680 | /* Clear the PIPE*STAT regs before the IIR */ |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2681 | if (pipe_stats[pipe] & 0x8000ffff) { |
| 2682 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
| 2683 | DRM_DEBUG_DRIVER("pipe %c underrun\n", |
| 2684 | pipe_name(pipe)); |
| 2685 | I915_WRITE(reg, pipe_stats[pipe]); |
Chris Wilson | 38bde18 | 2012-04-24 22:59:50 +0100 | [diff] [blame] | 2686 | irq_received = true; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2687 | } |
| 2688 | } |
| 2689 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 2690 | |
| 2691 | if (!irq_received) |
| 2692 | break; |
| 2693 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2694 | /* Consume port. Then clear IIR or we'll miss events */ |
| 2695 | if ((I915_HAS_HOTPLUG(dev)) && |
| 2696 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { |
| 2697 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 2698 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2699 | |
| 2700 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
| 2701 | hotplug_status); |
Daniel Vetter | 91d131d | 2013-06-27 17:52:14 +0200 | [diff] [blame] | 2702 | |
| 2703 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); |
| 2704 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2705 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
Chris Wilson | 38bde18 | 2012-04-24 22:59:50 +0100 | [diff] [blame] | 2706 | POSTING_READ(PORT_HOTPLUG_STAT); |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2707 | } |
| 2708 | |
Chris Wilson | 38bde18 | 2012-04-24 22:59:50 +0100 | [diff] [blame] | 2709 | I915_WRITE(IIR, iir & ~flip_mask); |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2710 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
| 2711 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2712 | if (iir & I915_USER_INTERRUPT) |
| 2713 | notify_ring(dev, &dev_priv->ring[RCS]); |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2714 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2715 | for_each_pipe(pipe) { |
Chris Wilson | 38bde18 | 2012-04-24 22:59:50 +0100 | [diff] [blame] | 2716 | int plane = pipe; |
| 2717 | if (IS_MOBILE(dev)) |
| 2718 | plane = !plane; |
Ville Syrjälä | 5e2032d | 2013-02-19 15:16:38 +0200 | [diff] [blame] | 2719 | |
Ville Syrjälä | 90a72f8 | 2013-02-19 23:16:44 +0200 | [diff] [blame] | 2720 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
| 2721 | i915_handle_vblank(dev, plane, pipe, iir)) |
| 2722 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2723 | |
| 2724 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) |
| 2725 | blc_event = true; |
| 2726 | } |
| 2727 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2728 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
| 2729 | intel_opregion_asle_intr(dev); |
| 2730 | |
| 2731 | /* With MSI, interrupts are only generated when iir |
| 2732 | * transitions from zero to nonzero. If another bit got |
| 2733 | * set while we were handling the existing iir bits, then |
| 2734 | * we would never get another interrupt. |
| 2735 | * |
| 2736 | * This is fine on non-MSI as well, as if we hit this path |
| 2737 | * we avoid exiting the interrupt handler only to generate |
| 2738 | * another one. |
| 2739 | * |
| 2740 | * Note that for MSI this could cause a stray interrupt report |
| 2741 | * if an interrupt landed in the time between writing IIR and |
| 2742 | * the posting read. This should be rare enough to never |
| 2743 | * trigger the 99% of 100,000 interrupts test for disabling |
| 2744 | * stray interrupts. |
| 2745 | */ |
Chris Wilson | 38bde18 | 2012-04-24 22:59:50 +0100 | [diff] [blame] | 2746 | ret = IRQ_HANDLED; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2747 | iir = new_iir; |
Chris Wilson | 38bde18 | 2012-04-24 22:59:50 +0100 | [diff] [blame] | 2748 | } while (iir & ~flip_mask); |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2749 | |
Daniel Vetter | d05c617 | 2012-04-26 23:28:09 +0200 | [diff] [blame] | 2750 | i915_update_dri1_breadcrumb(dev); |
Chris Wilson | 8291ee9 | 2012-04-24 22:59:47 +0100 | [diff] [blame] | 2751 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2752 | return ret; |
| 2753 | } |
| 2754 | |
| 2755 | static void i915_irq_uninstall(struct drm_device * dev) |
| 2756 | { |
| 2757 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2758 | int pipe; |
| 2759 | |
Egbert Eich | ac4c16c | 2013-04-16 13:36:58 +0200 | [diff] [blame] | 2760 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
| 2761 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2762 | if (I915_HAS_HOTPLUG(dev)) { |
| 2763 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2764 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
| 2765 | } |
| 2766 | |
Chris Wilson | 00d98eb | 2012-04-24 22:59:48 +0100 | [diff] [blame] | 2767 | I915_WRITE16(HWSTAM, 0xffff); |
Chris Wilson | 55b3975 | 2012-04-24 22:59:49 +0100 | [diff] [blame] | 2768 | for_each_pipe(pipe) { |
| 2769 | /* Clear enable bits; then clear status bits */ |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2770 | I915_WRITE(PIPESTAT(pipe), 0); |
Chris Wilson | 55b3975 | 2012-04-24 22:59:49 +0100 | [diff] [blame] | 2771 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); |
| 2772 | } |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2773 | I915_WRITE(IMR, 0xffffffff); |
| 2774 | I915_WRITE(IER, 0x0); |
| 2775 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2776 | I915_WRITE(IIR, I915_READ(IIR)); |
| 2777 | } |
| 2778 | |
| 2779 | static void i965_irq_preinstall(struct drm_device * dev) |
| 2780 | { |
| 2781 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2782 | int pipe; |
| 2783 | |
| 2784 | atomic_set(&dev_priv->irq_received, 0); |
| 2785 | |
Chris Wilson | adca473 | 2012-05-11 18:01:31 +0100 | [diff] [blame] | 2786 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2787 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2788 | |
| 2789 | I915_WRITE(HWSTAM, 0xeffe); |
| 2790 | for_each_pipe(pipe) |
| 2791 | I915_WRITE(PIPESTAT(pipe), 0); |
| 2792 | I915_WRITE(IMR, 0xffffffff); |
| 2793 | I915_WRITE(IER, 0x0); |
| 2794 | POSTING_READ(IER); |
| 2795 | } |
| 2796 | |
| 2797 | static int i965_irq_postinstall(struct drm_device *dev) |
| 2798 | { |
| 2799 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Chris Wilson | bbba0a9 | 2012-04-24 22:59:51 +0100 | [diff] [blame] | 2800 | u32 enable_mask; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2801 | u32 error_mask; |
Daniel Vetter | b79480b | 2013-06-27 17:52:10 +0200 | [diff] [blame] | 2802 | unsigned long irqflags; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2803 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2804 | /* Unmask the interrupts that we always want on. */ |
Chris Wilson | bbba0a9 | 2012-04-24 22:59:51 +0100 | [diff] [blame] | 2805 | dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | |
Chris Wilson | adca473 | 2012-05-11 18:01:31 +0100 | [diff] [blame] | 2806 | I915_DISPLAY_PORT_INTERRUPT | |
Chris Wilson | bbba0a9 | 2012-04-24 22:59:51 +0100 | [diff] [blame] | 2807 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2808 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
| 2809 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
| 2810 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
| 2811 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
| 2812 | |
| 2813 | enable_mask = ~dev_priv->irq_mask; |
Ville Syrjälä | 21ad833 | 2013-02-19 15:16:39 +0200 | [diff] [blame] | 2814 | enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
| 2815 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); |
Chris Wilson | bbba0a9 | 2012-04-24 22:59:51 +0100 | [diff] [blame] | 2816 | enable_mask |= I915_USER_INTERRUPT; |
| 2817 | |
| 2818 | if (IS_G4X(dev)) |
| 2819 | enable_mask |= I915_BSD_USER_INTERRUPT; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2820 | |
Daniel Vetter | b79480b | 2013-06-27 17:52:10 +0200 | [diff] [blame] | 2821 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
| 2822 | * just to make the assert_spin_locked check happy. */ |
| 2823 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
Daniel Vetter | 515ac2b | 2012-12-01 13:53:44 +0100 | [diff] [blame] | 2824 | i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
Daniel Vetter | b79480b | 2013-06-27 17:52:10 +0200 | [diff] [blame] | 2825 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2826 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2827 | /* |
| 2828 | * Enable some error detection, note the instruction error mask |
| 2829 | * bit is reserved, so we leave it masked. |
| 2830 | */ |
| 2831 | if (IS_G4X(dev)) { |
| 2832 | error_mask = ~(GM45_ERROR_PAGE_TABLE | |
| 2833 | GM45_ERROR_MEM_PRIV | |
| 2834 | GM45_ERROR_CP_PRIV | |
| 2835 | I915_ERROR_MEMORY_REFRESH); |
| 2836 | } else { |
| 2837 | error_mask = ~(I915_ERROR_PAGE_TABLE | |
| 2838 | I915_ERROR_MEMORY_REFRESH); |
| 2839 | } |
| 2840 | I915_WRITE(EMR, error_mask); |
| 2841 | |
| 2842 | I915_WRITE(IMR, dev_priv->irq_mask); |
| 2843 | I915_WRITE(IER, enable_mask); |
| 2844 | POSTING_READ(IER); |
| 2845 | |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 2846 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2847 | POSTING_READ(PORT_HOTPLUG_EN); |
| 2848 | |
Jani Nikula | f49e38d | 2013-04-29 13:02:54 +0300 | [diff] [blame] | 2849 | i915_enable_asle_pipestat(dev); |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 2850 | |
| 2851 | return 0; |
| 2852 | } |
| 2853 | |
Egbert Eich | bac56d5 | 2013-02-25 12:06:51 -0500 | [diff] [blame] | 2854 | static void i915_hpd_irq_setup(struct drm_device *dev) |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 2855 | { |
| 2856 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Egbert Eich | e5868a3 | 2013-02-28 04:17:12 -0500 | [diff] [blame] | 2857 | struct drm_mode_config *mode_config = &dev->mode_config; |
Egbert Eich | cd569ae | 2013-04-16 13:36:57 +0200 | [diff] [blame] | 2858 | struct intel_encoder *intel_encoder; |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 2859 | u32 hotplug_en; |
| 2860 | |
Daniel Vetter | b5ea2d5 | 2013-06-27 17:52:15 +0200 | [diff] [blame] | 2861 | assert_spin_locked(&dev_priv->irq_lock); |
| 2862 | |
Egbert Eich | bac56d5 | 2013-02-25 12:06:51 -0500 | [diff] [blame] | 2863 | if (I915_HAS_HOTPLUG(dev)) { |
| 2864 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
| 2865 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; |
| 2866 | /* Note HDMI and DP share hotplug bits */ |
Egbert Eich | e5868a3 | 2013-02-28 04:17:12 -0500 | [diff] [blame] | 2867 | /* enable bits are the same for all generations */ |
Egbert Eich | cd569ae | 2013-04-16 13:36:57 +0200 | [diff] [blame] | 2868 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
| 2869 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
| 2870 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; |
Egbert Eich | bac56d5 | 2013-02-25 12:06:51 -0500 | [diff] [blame] | 2871 | /* Programming the CRT detection parameters tends |
| 2872 | to generate a spurious hotplug event about three |
| 2873 | seconds later. So just do it once. |
| 2874 | */ |
| 2875 | if (IS_G4X(dev)) |
| 2876 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; |
Daniel Vetter | 85fc95b | 2013-03-27 15:47:11 +0100 | [diff] [blame] | 2877 | hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; |
Egbert Eich | bac56d5 | 2013-02-25 12:06:51 -0500 | [diff] [blame] | 2878 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2879 | |
Egbert Eich | bac56d5 | 2013-02-25 12:06:51 -0500 | [diff] [blame] | 2880 | /* Ignore TV since it's buggy */ |
| 2881 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
| 2882 | } |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2883 | } |
| 2884 | |
Daniel Vetter | ff1f525 | 2012-10-02 15:10:55 +0200 | [diff] [blame] | 2885 | static irqreturn_t i965_irq_handler(int irq, void *arg) |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2886 | { |
| 2887 | struct drm_device *dev = (struct drm_device *) arg; |
| 2888 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2889 | u32 iir, new_iir; |
| 2890 | u32 pipe_stats[I915_MAX_PIPES]; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2891 | unsigned long irqflags; |
| 2892 | int irq_received; |
| 2893 | int ret = IRQ_NONE, pipe; |
Ville Syrjälä | 21ad833 | 2013-02-19 15:16:39 +0200 | [diff] [blame] | 2894 | u32 flip_mask = |
| 2895 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
| 2896 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2897 | |
| 2898 | atomic_inc(&dev_priv->irq_received); |
| 2899 | |
| 2900 | iir = I915_READ(IIR); |
| 2901 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2902 | for (;;) { |
Chris Wilson | 2c8ba29 | 2012-04-24 22:59:46 +0100 | [diff] [blame] | 2903 | bool blc_event = false; |
| 2904 | |
Ville Syrjälä | 21ad833 | 2013-02-19 15:16:39 +0200 | [diff] [blame] | 2905 | irq_received = (iir & ~flip_mask) != 0; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2906 | |
| 2907 | /* Can't rely on pipestat interrupt bit in iir as it might |
| 2908 | * have been cleared after the pipestat interrupt was received. |
| 2909 | * It doesn't set the bit in iir again, but it still produces |
| 2910 | * interrupts (for non-MSI). |
| 2911 | */ |
| 2912 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 2913 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
| 2914 | i915_handle_error(dev, false); |
| 2915 | |
| 2916 | for_each_pipe(pipe) { |
| 2917 | int reg = PIPESTAT(pipe); |
| 2918 | pipe_stats[pipe] = I915_READ(reg); |
| 2919 | |
| 2920 | /* |
| 2921 | * Clear the PIPE*STAT regs before the IIR |
| 2922 | */ |
| 2923 | if (pipe_stats[pipe] & 0x8000ffff) { |
| 2924 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
| 2925 | DRM_DEBUG_DRIVER("pipe %c underrun\n", |
| 2926 | pipe_name(pipe)); |
| 2927 | I915_WRITE(reg, pipe_stats[pipe]); |
| 2928 | irq_received = 1; |
| 2929 | } |
| 2930 | } |
| 2931 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 2932 | |
| 2933 | if (!irq_received) |
| 2934 | break; |
| 2935 | |
| 2936 | ret = IRQ_HANDLED; |
| 2937 | |
| 2938 | /* Consume port. Then clear IIR or we'll miss events */ |
Chris Wilson | adca473 | 2012-05-11 18:01:31 +0100 | [diff] [blame] | 2939 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2940 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
Egbert Eich | b543fb0 | 2013-04-16 13:36:54 +0200 | [diff] [blame] | 2941 | u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? |
| 2942 | HOTPLUG_INT_STATUS_G4X : |
Daniel Vetter | 4f7fd70 | 2013-06-24 21:33:28 +0200 | [diff] [blame] | 2943 | HOTPLUG_INT_STATUS_I915); |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2944 | |
| 2945 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
| 2946 | hotplug_status); |
Daniel Vetter | 91d131d | 2013-06-27 17:52:14 +0200 | [diff] [blame] | 2947 | |
| 2948 | intel_hpd_irq_handler(dev, hotplug_trigger, |
| 2949 | IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); |
| 2950 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2951 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
| 2952 | I915_READ(PORT_HOTPLUG_STAT); |
| 2953 | } |
| 2954 | |
Ville Syrjälä | 21ad833 | 2013-02-19 15:16:39 +0200 | [diff] [blame] | 2955 | I915_WRITE(IIR, iir & ~flip_mask); |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2956 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
| 2957 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2958 | if (iir & I915_USER_INTERRUPT) |
| 2959 | notify_ring(dev, &dev_priv->ring[RCS]); |
| 2960 | if (iir & I915_BSD_USER_INTERRUPT) |
| 2961 | notify_ring(dev, &dev_priv->ring[VCS]); |
| 2962 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2963 | for_each_pipe(pipe) { |
Chris Wilson | 2c8ba29 | 2012-04-24 22:59:46 +0100 | [diff] [blame] | 2964 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
Ville Syrjälä | 90a72f8 | 2013-02-19 23:16:44 +0200 | [diff] [blame] | 2965 | i915_handle_vblank(dev, pipe, pipe, iir)) |
| 2966 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2967 | |
| 2968 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) |
| 2969 | blc_event = true; |
| 2970 | } |
| 2971 | |
| 2972 | |
| 2973 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
| 2974 | intel_opregion_asle_intr(dev); |
| 2975 | |
Daniel Vetter | 515ac2b | 2012-12-01 13:53:44 +0100 | [diff] [blame] | 2976 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
| 2977 | gmbus_irq_handler(dev); |
| 2978 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2979 | /* With MSI, interrupts are only generated when iir |
| 2980 | * transitions from zero to nonzero. If another bit got |
| 2981 | * set while we were handling the existing iir bits, then |
| 2982 | * we would never get another interrupt. |
| 2983 | * |
| 2984 | * This is fine on non-MSI as well, as if we hit this path |
| 2985 | * we avoid exiting the interrupt handler only to generate |
| 2986 | * another one. |
| 2987 | * |
| 2988 | * Note that for MSI this could cause a stray interrupt report |
| 2989 | * if an interrupt landed in the time between writing IIR and |
| 2990 | * the posting read. This should be rare enough to never |
| 2991 | * trigger the 99% of 100,000 interrupts test for disabling |
| 2992 | * stray interrupts. |
| 2993 | */ |
| 2994 | iir = new_iir; |
| 2995 | } |
| 2996 | |
Daniel Vetter | d05c617 | 2012-04-26 23:28:09 +0200 | [diff] [blame] | 2997 | i915_update_dri1_breadcrumb(dev); |
Chris Wilson | 2c8ba29 | 2012-04-24 22:59:46 +0100 | [diff] [blame] | 2998 | |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 2999 | return ret; |
| 3000 | } |
| 3001 | |
| 3002 | static void i965_irq_uninstall(struct drm_device * dev) |
| 3003 | { |
| 3004 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 3005 | int pipe; |
| 3006 | |
| 3007 | if (!dev_priv) |
| 3008 | return; |
| 3009 | |
Egbert Eich | ac4c16c | 2013-04-16 13:36:58 +0200 | [diff] [blame] | 3010 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
| 3011 | |
Chris Wilson | adca473 | 2012-05-11 18:01:31 +0100 | [diff] [blame] | 3012 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 3013 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 3014 | |
| 3015 | I915_WRITE(HWSTAM, 0xffffffff); |
| 3016 | for_each_pipe(pipe) |
| 3017 | I915_WRITE(PIPESTAT(pipe), 0); |
| 3018 | I915_WRITE(IMR, 0xffffffff); |
| 3019 | I915_WRITE(IER, 0x0); |
| 3020 | |
| 3021 | for_each_pipe(pipe) |
| 3022 | I915_WRITE(PIPESTAT(pipe), |
| 3023 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); |
| 3024 | I915_WRITE(IIR, I915_READ(IIR)); |
| 3025 | } |
| 3026 | |
Egbert Eich | ac4c16c | 2013-04-16 13:36:58 +0200 | [diff] [blame] | 3027 | static void i915_reenable_hotplug_timer_func(unsigned long data) |
| 3028 | { |
| 3029 | drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; |
| 3030 | struct drm_device *dev = dev_priv->dev; |
| 3031 | struct drm_mode_config *mode_config = &dev->mode_config; |
| 3032 | unsigned long irqflags; |
| 3033 | int i; |
| 3034 | |
| 3035 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 3036 | for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { |
| 3037 | struct drm_connector *connector; |
| 3038 | |
| 3039 | if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) |
| 3040 | continue; |
| 3041 | |
| 3042 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; |
| 3043 | |
| 3044 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
| 3045 | struct intel_connector *intel_connector = to_intel_connector(connector); |
| 3046 | |
| 3047 | if (intel_connector->encoder->hpd_pin == i) { |
| 3048 | if (connector->polled != intel_connector->polled) |
| 3049 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", |
| 3050 | drm_get_connector_name(connector)); |
| 3051 | connector->polled = intel_connector->polled; |
| 3052 | if (!connector->polled) |
| 3053 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
| 3054 | } |
| 3055 | } |
| 3056 | } |
| 3057 | if (dev_priv->display.hpd_irq_setup) |
| 3058 | dev_priv->display.hpd_irq_setup(dev); |
| 3059 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 3060 | } |
| 3061 | |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 3062 | void intel_irq_init(struct drm_device *dev) |
| 3063 | { |
Chris Wilson | 8b2e326 | 2012-04-24 22:59:41 +0100 | [diff] [blame] | 3064 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3065 | |
| 3066 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
Daniel Vetter | 99584db | 2012-11-14 17:14:04 +0100 | [diff] [blame] | 3067 | INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); |
Daniel Vetter | c6a828d | 2012-08-08 23:35:35 +0200 | [diff] [blame] | 3068 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); |
Daniel Vetter | a4da4fa | 2012-11-02 19:55:07 +0100 | [diff] [blame] | 3069 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
Chris Wilson | 8b2e326 | 2012-04-24 22:59:41 +0100 | [diff] [blame] | 3070 | |
Daniel Vetter | 99584db | 2012-11-14 17:14:04 +0100 | [diff] [blame] | 3071 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, |
| 3072 | i915_hangcheck_elapsed, |
Daniel Vetter | 61bac78 | 2012-12-01 21:03:21 +0100 | [diff] [blame] | 3073 | (unsigned long) dev); |
Egbert Eich | ac4c16c | 2013-04-16 13:36:58 +0200 | [diff] [blame] | 3074 | setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, |
| 3075 | (unsigned long) dev_priv); |
Daniel Vetter | 61bac78 | 2012-12-01 21:03:21 +0100 | [diff] [blame] | 3076 | |
Tomas Janousek | 97a19a2 | 2012-12-08 13:48:13 +0100 | [diff] [blame] | 3077 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
Daniel Vetter | 9ee32fea | 2012-12-01 13:53:48 +0100 | [diff] [blame] | 3078 | |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 3079 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
| 3080 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
Eugeni Dodonov | 7d4e146 | 2012-05-09 15:37:09 -0300 | [diff] [blame] | 3081 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 3082 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
| 3083 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; |
| 3084 | } |
| 3085 | |
Keith Packard | c3613de | 2011-08-12 17:05:54 -0700 | [diff] [blame] | 3086 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
| 3087 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; |
| 3088 | else |
| 3089 | dev->driver->get_vblank_timestamp = NULL; |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 3090 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
| 3091 | |
Jesse Barnes | 7e231dbe | 2012-03-28 13:39:38 -0700 | [diff] [blame] | 3092 | if (IS_VALLEYVIEW(dev)) { |
| 3093 | dev->driver->irq_handler = valleyview_irq_handler; |
| 3094 | dev->driver->irq_preinstall = valleyview_irq_preinstall; |
| 3095 | dev->driver->irq_postinstall = valleyview_irq_postinstall; |
| 3096 | dev->driver->irq_uninstall = valleyview_irq_uninstall; |
| 3097 | dev->driver->enable_vblank = valleyview_enable_vblank; |
| 3098 | dev->driver->disable_vblank = valleyview_disable_vblank; |
Egbert Eich | fa00abe | 2013-02-25 12:06:48 -0500 | [diff] [blame] | 3099 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 3100 | } else if (HAS_PCH_SPLIT(dev)) { |
| 3101 | dev->driver->irq_handler = ironlake_irq_handler; |
| 3102 | dev->driver->irq_preinstall = ironlake_irq_preinstall; |
| 3103 | dev->driver->irq_postinstall = ironlake_irq_postinstall; |
| 3104 | dev->driver->irq_uninstall = ironlake_irq_uninstall; |
| 3105 | dev->driver->enable_vblank = ironlake_enable_vblank; |
| 3106 | dev->driver->disable_vblank = ironlake_disable_vblank; |
Daniel Vetter | 82a28bc | 2013-03-27 15:55:01 +0100 | [diff] [blame] | 3107 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 3108 | } else { |
Chris Wilson | c2798b1 | 2012-04-22 21:13:57 +0100 | [diff] [blame] | 3109 | if (INTEL_INFO(dev)->gen == 2) { |
| 3110 | dev->driver->irq_preinstall = i8xx_irq_preinstall; |
| 3111 | dev->driver->irq_postinstall = i8xx_irq_postinstall; |
| 3112 | dev->driver->irq_handler = i8xx_irq_handler; |
| 3113 | dev->driver->irq_uninstall = i8xx_irq_uninstall; |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 3114 | } else if (INTEL_INFO(dev)->gen == 3) { |
| 3115 | dev->driver->irq_preinstall = i915_irq_preinstall; |
| 3116 | dev->driver->irq_postinstall = i915_irq_postinstall; |
| 3117 | dev->driver->irq_uninstall = i915_irq_uninstall; |
| 3118 | dev->driver->irq_handler = i915_irq_handler; |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 3119 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
Chris Wilson | c2798b1 | 2012-04-22 21:13:57 +0100 | [diff] [blame] | 3120 | } else { |
Chris Wilson | a266c7d | 2012-04-24 22:59:44 +0100 | [diff] [blame] | 3121 | dev->driver->irq_preinstall = i965_irq_preinstall; |
| 3122 | dev->driver->irq_postinstall = i965_irq_postinstall; |
| 3123 | dev->driver->irq_uninstall = i965_irq_uninstall; |
| 3124 | dev->driver->irq_handler = i965_irq_handler; |
Egbert Eich | bac56d5 | 2013-02-25 12:06:51 -0500 | [diff] [blame] | 3125 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
Chris Wilson | c2798b1 | 2012-04-22 21:13:57 +0100 | [diff] [blame] | 3126 | } |
Jesse Barnes | f71d4af | 2011-06-28 13:00:41 -0700 | [diff] [blame] | 3127 | dev->driver->enable_vblank = i915_enable_vblank; |
| 3128 | dev->driver->disable_vblank = i915_disable_vblank; |
| 3129 | } |
| 3130 | } |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 3131 | |
| 3132 | void intel_hpd_init(struct drm_device *dev) |
| 3133 | { |
| 3134 | struct drm_i915_private *dev_priv = dev->dev_private; |
Egbert Eich | 821450c | 2013-04-16 13:36:55 +0200 | [diff] [blame] | 3135 | struct drm_mode_config *mode_config = &dev->mode_config; |
| 3136 | struct drm_connector *connector; |
Daniel Vetter | b5ea2d5 | 2013-06-27 17:52:15 +0200 | [diff] [blame] | 3137 | unsigned long irqflags; |
Egbert Eich | 821450c | 2013-04-16 13:36:55 +0200 | [diff] [blame] | 3138 | int i; |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 3139 | |
Egbert Eich | 821450c | 2013-04-16 13:36:55 +0200 | [diff] [blame] | 3140 | for (i = 1; i < HPD_NUM_PINS; i++) { |
| 3141 | dev_priv->hpd_stats[i].hpd_cnt = 0; |
| 3142 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; |
| 3143 | } |
| 3144 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
| 3145 | struct intel_connector *intel_connector = to_intel_connector(connector); |
| 3146 | connector->polled = intel_connector->polled; |
| 3147 | if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) |
| 3148 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
| 3149 | } |
Daniel Vetter | b5ea2d5 | 2013-06-27 17:52:15 +0200 | [diff] [blame] | 3150 | |
| 3151 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
| 3152 | * just to make the assert_spin_locked checks happy. */ |
| 3153 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 3154 | if (dev_priv->display.hpd_irq_setup) |
| 3155 | dev_priv->display.hpd_irq_setup(dev); |
Daniel Vetter | b5ea2d5 | 2013-06-27 17:52:15 +0200 | [diff] [blame] | 3156 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
Daniel Vetter | 20afbda | 2012-12-11 14:05:07 +0100 | [diff] [blame] | 3157 | } |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 3158 | |
| 3159 | /* Disable interrupts so we can allow Package C8+. */ |
| 3160 | void hsw_pc8_disable_interrupts(struct drm_device *dev) |
| 3161 | { |
| 3162 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3163 | unsigned long irqflags; |
| 3164 | |
| 3165 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 3166 | |
| 3167 | dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); |
| 3168 | dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); |
| 3169 | dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); |
| 3170 | dev_priv->pc8.regsave.gtier = I915_READ(GTIER); |
| 3171 | dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); |
| 3172 | |
| 3173 | ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB); |
| 3174 | ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT); |
| 3175 | ilk_disable_gt_irq(dev_priv, 0xffffffff); |
| 3176 | snb_disable_pm_irq(dev_priv, 0xffffffff); |
| 3177 | |
| 3178 | dev_priv->pc8.irqs_disabled = true; |
| 3179 | |
| 3180 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 3181 | } |
| 3182 | |
| 3183 | /* Restore interrupts so we can recover from Package C8+. */ |
| 3184 | void hsw_pc8_restore_interrupts(struct drm_device *dev) |
| 3185 | { |
| 3186 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3187 | unsigned long irqflags; |
| 3188 | uint32_t val, expected; |
| 3189 | |
| 3190 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 3191 | |
| 3192 | val = I915_READ(DEIMR); |
| 3193 | expected = ~DE_PCH_EVENT_IVB; |
| 3194 | WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected); |
| 3195 | |
| 3196 | val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT; |
| 3197 | expected = ~SDE_HOTPLUG_MASK_CPT; |
| 3198 | WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n", |
| 3199 | val, expected); |
| 3200 | |
| 3201 | val = I915_READ(GTIMR); |
| 3202 | expected = 0xffffffff; |
| 3203 | WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected); |
| 3204 | |
| 3205 | val = I915_READ(GEN6_PMIMR); |
| 3206 | expected = 0xffffffff; |
| 3207 | WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val, |
| 3208 | expected); |
| 3209 | |
| 3210 | dev_priv->pc8.irqs_disabled = false; |
| 3211 | |
| 3212 | ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); |
| 3213 | ibx_enable_display_interrupt(dev_priv, |
| 3214 | ~dev_priv->pc8.regsave.sdeimr & |
| 3215 | ~SDE_HOTPLUG_MASK_CPT); |
| 3216 | ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); |
| 3217 | snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); |
| 3218 | I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); |
| 3219 | |
| 3220 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 3221 | } |