Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. |
| 3 | * Copyright 2008 Red Hat Inc. |
| 4 | * Copyright 2009 Jerome Glisse. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the "Software"), |
| 8 | * to deal in the Software without restriction, including without limitation |
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 10 | * and/or sell copies of the Software, and to permit persons to whom the |
| 11 | * Software is furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 22 | * OTHER DEALINGS IN THE SOFTWARE. |
| 23 | * |
| 24 | * Authors: Dave Airlie |
| 25 | * Alex Deucher |
| 26 | * Jerome Glisse |
| 27 | */ |
| 28 | #include "drmP.h" |
Dave Airlie | eb1f8e4 | 2010-05-07 06:42:51 +0000 | [diff] [blame] | 29 | #include "drm_crtc_helper.h" |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 30 | #include "radeon_drm.h" |
| 31 | #include "radeon_reg.h" |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 32 | #include "radeon.h" |
| 33 | #include "atom.h" |
| 34 | |
Christian Koenig | fb98257 | 2012-05-17 01:33:30 +0200 | [diff] [blame] | 35 | #define RADEON_WAIT_IDLE_TIMEOUT 200 |
| 36 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 37 | irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS) |
| 38 | { |
| 39 | struct drm_device *dev = (struct drm_device *) arg; |
| 40 | struct radeon_device *rdev = dev->dev_private; |
| 41 | |
| 42 | return radeon_irq_process(rdev); |
| 43 | } |
| 44 | |
Alex Deucher | d4877cf | 2009-12-04 16:56:37 -0500 | [diff] [blame] | 45 | /* |
| 46 | * Handle hotplug events outside the interrupt handler proper. |
| 47 | */ |
| 48 | static void radeon_hotplug_work_func(struct work_struct *work) |
| 49 | { |
| 50 | struct radeon_device *rdev = container_of(work, struct radeon_device, |
| 51 | hotplug_work); |
| 52 | struct drm_device *dev = rdev->ddev; |
| 53 | struct drm_mode_config *mode_config = &dev->mode_config; |
| 54 | struct drm_connector *connector; |
| 55 | |
| 56 | if (mode_config->num_connector) { |
| 57 | list_for_each_entry(connector, &mode_config->connector_list, head) |
| 58 | radeon_connector_hotplug(connector); |
| 59 | } |
| 60 | /* Just fire off a uevent and let userspace tell us what to do */ |
Dave Airlie | eb1f8e4 | 2010-05-07 06:42:51 +0000 | [diff] [blame] | 61 | drm_helper_hpd_irq_event(dev); |
Alex Deucher | d4877cf | 2009-12-04 16:56:37 -0500 | [diff] [blame] | 62 | } |
| 63 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 64 | void radeon_driver_irq_preinstall_kms(struct drm_device *dev) |
| 65 | { |
| 66 | struct radeon_device *rdev = dev->dev_private; |
Christian Koenig | fb98257 | 2012-05-17 01:33:30 +0200 | [diff] [blame] | 67 | unsigned long irqflags; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 68 | unsigned i; |
| 69 | |
Christian Koenig | fb98257 | 2012-05-17 01:33:30 +0200 | [diff] [blame] | 70 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 71 | /* Disable *all* interrupts */ |
Alex Deucher | 1b37078 | 2011-11-17 20:13:28 -0500 | [diff] [blame] | 72 | for (i = 0; i < RADEON_NUM_RINGS; i++) |
Christian Koenig | 736fc37 | 2012-05-17 19:52:00 +0200 | [diff] [blame] | 73 | atomic_set(&rdev->irq.ring_int[i], 0); |
Alex Deucher | 2031f77 | 2010-04-22 12:52:11 -0400 | [diff] [blame] | 74 | rdev->irq.gui_idle = false; |
Ilija Hadzic | 54bd520 | 2011-10-26 15:43:58 -0400 | [diff] [blame] | 75 | for (i = 0; i < RADEON_MAX_HPD_PINS; i++) |
Alex Deucher | 9e7b414 | 2010-03-16 17:08:06 -0400 | [diff] [blame] | 76 | rdev->irq.hpd[i] = false; |
Ilija Hadzic | 54bd520 | 2011-10-26 15:43:58 -0400 | [diff] [blame] | 77 | for (i = 0; i < RADEON_MAX_CRTCS; i++) { |
| 78 | rdev->irq.crtc_vblank_int[i] = false; |
Christian Koenig | 736fc37 | 2012-05-17 19:52:00 +0200 | [diff] [blame] | 79 | atomic_set(&rdev->irq.pflip[i], 0); |
Alex Deucher | f122c61 | 2012-03-30 08:59:57 -0400 | [diff] [blame] | 80 | rdev->irq.afmt[i] = false; |
Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 81 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 82 | radeon_irq_set(rdev); |
Christian Koenig | fb98257 | 2012-05-17 01:33:30 +0200 | [diff] [blame] | 83 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 84 | /* Clear bits */ |
| 85 | radeon_irq_process(rdev); |
| 86 | } |
| 87 | |
| 88 | int radeon_driver_irq_postinstall_kms(struct drm_device *dev) |
| 89 | { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 90 | dev->max_vblank_count = 0x001fffff; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 91 | return 0; |
| 92 | } |
| 93 | |
| 94 | void radeon_driver_irq_uninstall_kms(struct drm_device *dev) |
| 95 | { |
| 96 | struct radeon_device *rdev = dev->dev_private; |
Christian Koenig | fb98257 | 2012-05-17 01:33:30 +0200 | [diff] [blame] | 97 | unsigned long irqflags; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 98 | unsigned i; |
| 99 | |
| 100 | if (rdev == NULL) { |
| 101 | return; |
| 102 | } |
Christian Koenig | fb98257 | 2012-05-17 01:33:30 +0200 | [diff] [blame] | 103 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 104 | /* Disable *all* interrupts */ |
Alex Deucher | 1b37078 | 2011-11-17 20:13:28 -0500 | [diff] [blame] | 105 | for (i = 0; i < RADEON_NUM_RINGS; i++) |
Christian Koenig | 736fc37 | 2012-05-17 19:52:00 +0200 | [diff] [blame] | 106 | atomic_set(&rdev->irq.ring_int[i], 0); |
Alex Deucher | 2031f77 | 2010-04-22 12:52:11 -0400 | [diff] [blame] | 107 | rdev->irq.gui_idle = false; |
Ilija Hadzic | 54bd520 | 2011-10-26 15:43:58 -0400 | [diff] [blame] | 108 | for (i = 0; i < RADEON_MAX_HPD_PINS; i++) |
Jerome Glisse | 003e69f | 2010-01-07 15:39:14 +0100 | [diff] [blame] | 109 | rdev->irq.hpd[i] = false; |
Ilija Hadzic | 54bd520 | 2011-10-26 15:43:58 -0400 | [diff] [blame] | 110 | for (i = 0; i < RADEON_MAX_CRTCS; i++) { |
| 111 | rdev->irq.crtc_vblank_int[i] = false; |
Christian Koenig | 736fc37 | 2012-05-17 19:52:00 +0200 | [diff] [blame] | 112 | atomic_set(&rdev->irq.pflip[i], 0); |
Alex Deucher | f122c61 | 2012-03-30 08:59:57 -0400 | [diff] [blame] | 113 | rdev->irq.afmt[i] = false; |
Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 114 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 115 | radeon_irq_set(rdev); |
Christian Koenig | fb98257 | 2012-05-17 01:33:30 +0200 | [diff] [blame] | 116 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 117 | } |
| 118 | |
Alex Deucher | 8f6c25c | 2011-10-25 14:58:49 -0400 | [diff] [blame] | 119 | static bool radeon_msi_ok(struct radeon_device *rdev) |
| 120 | { |
| 121 | /* RV370/RV380 was first asic with MSI support */ |
| 122 | if (rdev->family < CHIP_RV380) |
| 123 | return false; |
| 124 | |
| 125 | /* MSIs don't work on AGP */ |
| 126 | if (rdev->flags & RADEON_IS_AGP) |
| 127 | return false; |
| 128 | |
Alex Deucher | a18cee1 | 2011-11-01 14:20:30 -0400 | [diff] [blame] | 129 | /* force MSI on */ |
| 130 | if (radeon_msi == 1) |
| 131 | return true; |
| 132 | else if (radeon_msi == 0) |
| 133 | return false; |
| 134 | |
Alex Deucher | b362105 | 2011-10-25 15:11:08 -0400 | [diff] [blame] | 135 | /* Quirks */ |
| 136 | /* HP RS690 only seems to work with MSIs. */ |
| 137 | if ((rdev->pdev->device == 0x791f) && |
| 138 | (rdev->pdev->subsystem_vendor == 0x103c) && |
| 139 | (rdev->pdev->subsystem_device == 0x30c2)) |
| 140 | return true; |
| 141 | |
Alex Deucher | 01e718e | 2011-11-01 14:14:18 -0400 | [diff] [blame] | 142 | /* Dell RS690 only seems to work with MSIs. */ |
| 143 | if ((rdev->pdev->device == 0x791f) && |
| 144 | (rdev->pdev->subsystem_vendor == 0x1028) && |
Alex Deucher | 44517c4 | 2012-01-15 08:51:12 -0500 | [diff] [blame] | 145 | (rdev->pdev->subsystem_device == 0x01fc)) |
| 146 | return true; |
| 147 | |
| 148 | /* Dell RS690 only seems to work with MSIs. */ |
| 149 | if ((rdev->pdev->device == 0x791f) && |
| 150 | (rdev->pdev->subsystem_vendor == 0x1028) && |
Alex Deucher | 01e718e | 2011-11-01 14:14:18 -0400 | [diff] [blame] | 151 | (rdev->pdev->subsystem_device == 0x01fd)) |
| 152 | return true; |
| 153 | |
Dave Airlie | 16a5e32 | 2012-04-13 11:14:50 +0100 | [diff] [blame] | 154 | /* RV515 seems to have MSI issues where it loses |
| 155 | * MSI rearms occasionally. This leads to lockups and freezes. |
| 156 | * disable it by default. |
| 157 | */ |
| 158 | if (rdev->family == CHIP_RV515) |
| 159 | return false; |
Alex Deucher | 8f6c25c | 2011-10-25 14:58:49 -0400 | [diff] [blame] | 160 | if (rdev->flags & RADEON_IS_IGP) { |
| 161 | /* APUs work fine with MSIs */ |
| 162 | if (rdev->family >= CHIP_PALM) |
| 163 | return true; |
| 164 | /* lots of IGPs have problems with MSIs */ |
| 165 | return false; |
| 166 | } |
| 167 | |
| 168 | return true; |
| 169 | } |
| 170 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 171 | int radeon_irq_kms_init(struct radeon_device *rdev) |
| 172 | { |
| 173 | int r = 0; |
| 174 | |
Tejun Heo | 32c87fc | 2011-01-03 14:49:32 +0100 | [diff] [blame] | 175 | INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); |
Alex Deucher | f122c61 | 2012-03-30 08:59:57 -0400 | [diff] [blame] | 176 | INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); |
Tejun Heo | 32c87fc | 2011-01-03 14:49:32 +0100 | [diff] [blame] | 177 | |
Christian Koenig | fb98257 | 2012-05-17 01:33:30 +0200 | [diff] [blame] | 178 | spin_lock_init(&rdev->irq.lock); |
Alex Deucher | 9e7b414 | 2010-03-16 17:08:06 -0400 | [diff] [blame] | 179 | r = drm_vblank_init(rdev->ddev, rdev->num_crtc); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 180 | if (r) { |
| 181 | return r; |
| 182 | } |
Alex Deucher | 3e5cb98 | 2009-10-16 12:21:24 -0400 | [diff] [blame] | 183 | /* enable msi */ |
| 184 | rdev->msi_enabled = 0; |
Alex Deucher | 8f6c25c | 2011-10-25 14:58:49 -0400 | [diff] [blame] | 185 | |
| 186 | if (radeon_msi_ok(rdev)) { |
Alex Deucher | 3e5cb98 | 2009-10-16 12:21:24 -0400 | [diff] [blame] | 187 | int ret = pci_enable_msi(rdev->pdev); |
Alex Deucher | d8f60cf | 2009-12-01 13:43:46 -0500 | [diff] [blame] | 188 | if (!ret) { |
Alex Deucher | 3e5cb98 | 2009-10-16 12:21:24 -0400 | [diff] [blame] | 189 | rdev->msi_enabled = 1; |
Alex Deucher | da7be68 | 2010-08-12 18:05:34 -0400 | [diff] [blame] | 190 | dev_info(rdev->dev, "radeon: using MSI.\n"); |
Alex Deucher | d8f60cf | 2009-12-01 13:43:46 -0500 | [diff] [blame] | 191 | } |
Alex Deucher | 3e5cb98 | 2009-10-16 12:21:24 -0400 | [diff] [blame] | 192 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 193 | rdev->irq.installed = true; |
Jerome Glisse | 003e69f | 2010-01-07 15:39:14 +0100 | [diff] [blame] | 194 | r = drm_irq_install(rdev->ddev); |
| 195 | if (r) { |
| 196 | rdev->irq.installed = false; |
| 197 | return r; |
| 198 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 199 | DRM_INFO("radeon: irq initialized.\n"); |
| 200 | return 0; |
| 201 | } |
| 202 | |
| 203 | void radeon_irq_kms_fini(struct radeon_device *rdev) |
| 204 | { |
Jerome Glisse | 003e69f | 2010-01-07 15:39:14 +0100 | [diff] [blame] | 205 | drm_vblank_cleanup(rdev->ddev); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 206 | if (rdev->irq.installed) { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 207 | drm_irq_uninstall(rdev->ddev); |
Jerome Glisse | 003e69f | 2010-01-07 15:39:14 +0100 | [diff] [blame] | 208 | rdev->irq.installed = false; |
Alex Deucher | 3e5cb98 | 2009-10-16 12:21:24 -0400 | [diff] [blame] | 209 | if (rdev->msi_enabled) |
| 210 | pci_disable_msi(rdev->pdev); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 211 | } |
Tejun Heo | 32c87fc | 2011-01-03 14:49:32 +0100 | [diff] [blame] | 212 | flush_work_sync(&rdev->hotplug_work); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 213 | } |
Dave Airlie | 1614f8b | 2009-12-01 16:04:56 +1000 | [diff] [blame] | 214 | |
Alex Deucher | 1b37078 | 2011-11-17 20:13:28 -0500 | [diff] [blame] | 215 | void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring) |
Dave Airlie | 1614f8b | 2009-12-01 16:04:56 +1000 | [diff] [blame] | 216 | { |
| 217 | unsigned long irqflags; |
| 218 | |
Christian Koenig | 736fc37 | 2012-05-17 19:52:00 +0200 | [diff] [blame] | 219 | if (!rdev->ddev->irq_enabled) |
| 220 | return; |
| 221 | |
| 222 | if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) { |
| 223 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
Dave Airlie | 1614f8b | 2009-12-01 16:04:56 +1000 | [diff] [blame] | 224 | radeon_irq_set(rdev); |
Christian Koenig | 736fc37 | 2012-05-17 19:52:00 +0200 | [diff] [blame] | 225 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
Dave Airlie | 1614f8b | 2009-12-01 16:04:56 +1000 | [diff] [blame] | 226 | } |
Dave Airlie | 1614f8b | 2009-12-01 16:04:56 +1000 | [diff] [blame] | 227 | } |
| 228 | |
Alex Deucher | 1b37078 | 2011-11-17 20:13:28 -0500 | [diff] [blame] | 229 | void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring) |
Dave Airlie | 1614f8b | 2009-12-01 16:04:56 +1000 | [diff] [blame] | 230 | { |
| 231 | unsigned long irqflags; |
| 232 | |
Christian Koenig | 736fc37 | 2012-05-17 19:52:00 +0200 | [diff] [blame] | 233 | if (!rdev->ddev->irq_enabled) |
| 234 | return; |
| 235 | |
| 236 | if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) { |
| 237 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
Dave Airlie | 1614f8b | 2009-12-01 16:04:56 +1000 | [diff] [blame] | 238 | radeon_irq_set(rdev); |
Christian Koenig | 736fc37 | 2012-05-17 19:52:00 +0200 | [diff] [blame] | 239 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
Dave Airlie | 1614f8b | 2009-12-01 16:04:56 +1000 | [diff] [blame] | 240 | } |
Dave Airlie | 1614f8b | 2009-12-01 16:04:56 +1000 | [diff] [blame] | 241 | } |
| 242 | |
Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 243 | void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) |
| 244 | { |
| 245 | unsigned long irqflags; |
| 246 | |
| 247 | if (crtc < 0 || crtc >= rdev->num_crtc) |
| 248 | return; |
| 249 | |
Christian Koenig | 736fc37 | 2012-05-17 19:52:00 +0200 | [diff] [blame] | 250 | if (!rdev->ddev->irq_enabled) |
| 251 | return; |
| 252 | |
| 253 | if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) { |
| 254 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 255 | radeon_irq_set(rdev); |
Christian Koenig | 736fc37 | 2012-05-17 19:52:00 +0200 | [diff] [blame] | 256 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 257 | } |
Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 258 | } |
| 259 | |
| 260 | void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) |
| 261 | { |
| 262 | unsigned long irqflags; |
| 263 | |
| 264 | if (crtc < 0 || crtc >= rdev->num_crtc) |
| 265 | return; |
| 266 | |
Christian Koenig | 736fc37 | 2012-05-17 19:52:00 +0200 | [diff] [blame] | 267 | if (!rdev->ddev->irq_enabled) |
| 268 | return; |
| 269 | |
| 270 | if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) { |
| 271 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 272 | radeon_irq_set(rdev); |
Christian Koenig | 736fc37 | 2012-05-17 19:52:00 +0200 | [diff] [blame] | 273 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 274 | } |
Alex Deucher | 6f34be5 | 2010-11-21 10:59:01 -0500 | [diff] [blame] | 275 | } |
| 276 | |
Christian Koenig | fb98257 | 2012-05-17 01:33:30 +0200 | [diff] [blame] | 277 | void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block) |
| 278 | { |
| 279 | unsigned long irqflags; |
| 280 | |
| 281 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
| 282 | rdev->irq.afmt[block] = true; |
| 283 | radeon_irq_set(rdev); |
| 284 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
| 285 | |
| 286 | } |
| 287 | |
| 288 | void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block) |
| 289 | { |
| 290 | unsigned long irqflags; |
| 291 | |
| 292 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
| 293 | rdev->irq.afmt[block] = false; |
| 294 | radeon_irq_set(rdev); |
| 295 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
| 296 | } |
| 297 | |
| 298 | void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask) |
| 299 | { |
| 300 | unsigned long irqflags; |
| 301 | int i; |
| 302 | |
| 303 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
| 304 | for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) |
| 305 | rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i)); |
| 306 | radeon_irq_set(rdev); |
| 307 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
| 308 | } |
| 309 | |
| 310 | void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask) |
| 311 | { |
| 312 | unsigned long irqflags; |
| 313 | int i; |
| 314 | |
| 315 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
| 316 | for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) |
| 317 | rdev->irq.hpd[i] &= !(hpd_mask & (1 << i)); |
| 318 | radeon_irq_set(rdev); |
| 319 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
| 320 | } |
| 321 | |
| 322 | int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev) |
| 323 | { |
| 324 | unsigned long irqflags; |
| 325 | int r; |
| 326 | |
| 327 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
| 328 | rdev->irq.gui_idle = true; |
| 329 | radeon_irq_set(rdev); |
| 330 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
| 331 | |
| 332 | r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev), |
| 333 | msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); |
| 334 | |
| 335 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
| 336 | rdev->irq.gui_idle = false; |
| 337 | radeon_irq_set(rdev); |
| 338 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
| 339 | return r; |
| 340 | } |