blob: 6664514bbdca310dd1a7762ef824aae8a3fd838c [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
Dave Airlieeb1f8e42010-05-07 06:42:51 +000029#include "drm_crtc_helper.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020030#include "radeon_drm.h"
31#include "radeon_reg.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020032#include "radeon.h"
33#include "atom.h"
34
Christian Koenigfb982572012-05-17 01:33:30 +020035#define RADEON_WAIT_IDLE_TIMEOUT 200
36
Jerome Glisse771fe6b2009-06-05 14:42:42 +020037irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
38{
39 struct drm_device *dev = (struct drm_device *) arg;
40 struct radeon_device *rdev = dev->dev_private;
41
42 return radeon_irq_process(rdev);
43}
44
Alex Deucherd4877cf2009-12-04 16:56:37 -050045/*
46 * Handle hotplug events outside the interrupt handler proper.
47 */
48static void radeon_hotplug_work_func(struct work_struct *work)
49{
50 struct radeon_device *rdev = container_of(work, struct radeon_device,
51 hotplug_work);
52 struct drm_device *dev = rdev->ddev;
53 struct drm_mode_config *mode_config = &dev->mode_config;
54 struct drm_connector *connector;
55
56 if (mode_config->num_connector) {
57 list_for_each_entry(connector, &mode_config->connector_list, head)
58 radeon_connector_hotplug(connector);
59 }
60 /* Just fire off a uevent and let userspace tell us what to do */
Dave Airlieeb1f8e42010-05-07 06:42:51 +000061 drm_helper_hpd_irq_event(dev);
Alex Deucherd4877cf2009-12-04 16:56:37 -050062}
63
Jerome Glisse771fe6b2009-06-05 14:42:42 +020064void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
65{
66 struct radeon_device *rdev = dev->dev_private;
Christian Koenigfb982572012-05-17 01:33:30 +020067 unsigned long irqflags;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020068 unsigned i;
69
Christian Koenigfb982572012-05-17 01:33:30 +020070 spin_lock_irqsave(&rdev->irq.lock, irqflags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020071 /* Disable *all* interrupts */
Alex Deucher1b370782011-11-17 20:13:28 -050072 for (i = 0; i < RADEON_NUM_RINGS; i++)
Christian Koenig736fc372012-05-17 19:52:00 +020073 atomic_set(&rdev->irq.ring_int[i], 0);
Alex Deucher2031f772010-04-22 12:52:11 -040074 rdev->irq.gui_idle = false;
Ilija Hadzic54bd5202011-10-26 15:43:58 -040075 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
Alex Deucher9e7b4142010-03-16 17:08:06 -040076 rdev->irq.hpd[i] = false;
Ilija Hadzic54bd5202011-10-26 15:43:58 -040077 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
78 rdev->irq.crtc_vblank_int[i] = false;
Christian Koenig736fc372012-05-17 19:52:00 +020079 atomic_set(&rdev->irq.pflip[i], 0);
Alex Deucherf122c612012-03-30 08:59:57 -040080 rdev->irq.afmt[i] = false;
Alex Deucher6f34be52010-11-21 10:59:01 -050081 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +020082 radeon_irq_set(rdev);
Christian Koenigfb982572012-05-17 01:33:30 +020083 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020084 /* Clear bits */
85 radeon_irq_process(rdev);
86}
87
88int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
89{
Jerome Glisse771fe6b2009-06-05 14:42:42 +020090 dev->max_vblank_count = 0x001fffff;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020091 return 0;
92}
93
94void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
95{
96 struct radeon_device *rdev = dev->dev_private;
Christian Koenigfb982572012-05-17 01:33:30 +020097 unsigned long irqflags;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020098 unsigned i;
99
100 if (rdev == NULL) {
101 return;
102 }
Christian Koenigfb982572012-05-17 01:33:30 +0200103 spin_lock_irqsave(&rdev->irq.lock, irqflags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200104 /* Disable *all* interrupts */
Alex Deucher1b370782011-11-17 20:13:28 -0500105 for (i = 0; i < RADEON_NUM_RINGS; i++)
Christian Koenig736fc372012-05-17 19:52:00 +0200106 atomic_set(&rdev->irq.ring_int[i], 0);
Alex Deucher2031f772010-04-22 12:52:11 -0400107 rdev->irq.gui_idle = false;
Ilija Hadzic54bd5202011-10-26 15:43:58 -0400108 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
Jerome Glisse003e69f2010-01-07 15:39:14 +0100109 rdev->irq.hpd[i] = false;
Ilija Hadzic54bd5202011-10-26 15:43:58 -0400110 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
111 rdev->irq.crtc_vblank_int[i] = false;
Christian Koenig736fc372012-05-17 19:52:00 +0200112 atomic_set(&rdev->irq.pflip[i], 0);
Alex Deucherf122c612012-03-30 08:59:57 -0400113 rdev->irq.afmt[i] = false;
Alex Deucher6f34be52010-11-21 10:59:01 -0500114 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200115 radeon_irq_set(rdev);
Christian Koenigfb982572012-05-17 01:33:30 +0200116 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200117}
118
Alex Deucher8f6c25c2011-10-25 14:58:49 -0400119static bool radeon_msi_ok(struct radeon_device *rdev)
120{
121 /* RV370/RV380 was first asic with MSI support */
122 if (rdev->family < CHIP_RV380)
123 return false;
124
125 /* MSIs don't work on AGP */
126 if (rdev->flags & RADEON_IS_AGP)
127 return false;
128
Alex Deuchera18cee12011-11-01 14:20:30 -0400129 /* force MSI on */
130 if (radeon_msi == 1)
131 return true;
132 else if (radeon_msi == 0)
133 return false;
134
Alex Deucherb3621052011-10-25 15:11:08 -0400135 /* Quirks */
136 /* HP RS690 only seems to work with MSIs. */
137 if ((rdev->pdev->device == 0x791f) &&
138 (rdev->pdev->subsystem_vendor == 0x103c) &&
139 (rdev->pdev->subsystem_device == 0x30c2))
140 return true;
141
Alex Deucher01e718e2011-11-01 14:14:18 -0400142 /* Dell RS690 only seems to work with MSIs. */
143 if ((rdev->pdev->device == 0x791f) &&
144 (rdev->pdev->subsystem_vendor == 0x1028) &&
Alex Deucher44517c42012-01-15 08:51:12 -0500145 (rdev->pdev->subsystem_device == 0x01fc))
146 return true;
147
148 /* Dell RS690 only seems to work with MSIs. */
149 if ((rdev->pdev->device == 0x791f) &&
150 (rdev->pdev->subsystem_vendor == 0x1028) &&
Alex Deucher01e718e2011-11-01 14:14:18 -0400151 (rdev->pdev->subsystem_device == 0x01fd))
152 return true;
153
Dave Airlie16a5e322012-04-13 11:14:50 +0100154 /* RV515 seems to have MSI issues where it loses
155 * MSI rearms occasionally. This leads to lockups and freezes.
156 * disable it by default.
157 */
158 if (rdev->family == CHIP_RV515)
159 return false;
Alex Deucher8f6c25c2011-10-25 14:58:49 -0400160 if (rdev->flags & RADEON_IS_IGP) {
161 /* APUs work fine with MSIs */
162 if (rdev->family >= CHIP_PALM)
163 return true;
164 /* lots of IGPs have problems with MSIs */
165 return false;
166 }
167
168 return true;
169}
170
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200171int radeon_irq_kms_init(struct radeon_device *rdev)
172{
173 int r = 0;
174
Tejun Heo32c87fc2011-01-03 14:49:32 +0100175 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
Alex Deucherf122c612012-03-30 08:59:57 -0400176 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
Tejun Heo32c87fc2011-01-03 14:49:32 +0100177
Christian Koenigfb982572012-05-17 01:33:30 +0200178 spin_lock_init(&rdev->irq.lock);
Alex Deucher9e7b4142010-03-16 17:08:06 -0400179 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200180 if (r) {
181 return r;
182 }
Alex Deucher3e5cb982009-10-16 12:21:24 -0400183 /* enable msi */
184 rdev->msi_enabled = 0;
Alex Deucher8f6c25c2011-10-25 14:58:49 -0400185
186 if (radeon_msi_ok(rdev)) {
Alex Deucher3e5cb982009-10-16 12:21:24 -0400187 int ret = pci_enable_msi(rdev->pdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500188 if (!ret) {
Alex Deucher3e5cb982009-10-16 12:21:24 -0400189 rdev->msi_enabled = 1;
Alex Deucherda7be682010-08-12 18:05:34 -0400190 dev_info(rdev->dev, "radeon: using MSI.\n");
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500191 }
Alex Deucher3e5cb982009-10-16 12:21:24 -0400192 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200193 rdev->irq.installed = true;
Jerome Glisse003e69f2010-01-07 15:39:14 +0100194 r = drm_irq_install(rdev->ddev);
195 if (r) {
196 rdev->irq.installed = false;
197 return r;
198 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200199 DRM_INFO("radeon: irq initialized.\n");
200 return 0;
201}
202
203void radeon_irq_kms_fini(struct radeon_device *rdev)
204{
Jerome Glisse003e69f2010-01-07 15:39:14 +0100205 drm_vblank_cleanup(rdev->ddev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200206 if (rdev->irq.installed) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200207 drm_irq_uninstall(rdev->ddev);
Jerome Glisse003e69f2010-01-07 15:39:14 +0100208 rdev->irq.installed = false;
Alex Deucher3e5cb982009-10-16 12:21:24 -0400209 if (rdev->msi_enabled)
210 pci_disable_msi(rdev->pdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200211 }
Tejun Heo32c87fc2011-01-03 14:49:32 +0100212 flush_work_sync(&rdev->hotplug_work);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200213}
Dave Airlie1614f8b2009-12-01 16:04:56 +1000214
Alex Deucher1b370782011-11-17 20:13:28 -0500215void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
Dave Airlie1614f8b2009-12-01 16:04:56 +1000216{
217 unsigned long irqflags;
218
Christian Koenig736fc372012-05-17 19:52:00 +0200219 if (!rdev->ddev->irq_enabled)
220 return;
221
222 if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
223 spin_lock_irqsave(&rdev->irq.lock, irqflags);
Dave Airlie1614f8b2009-12-01 16:04:56 +1000224 radeon_irq_set(rdev);
Christian Koenig736fc372012-05-17 19:52:00 +0200225 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
Dave Airlie1614f8b2009-12-01 16:04:56 +1000226 }
Dave Airlie1614f8b2009-12-01 16:04:56 +1000227}
228
Alex Deucher1b370782011-11-17 20:13:28 -0500229void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
Dave Airlie1614f8b2009-12-01 16:04:56 +1000230{
231 unsigned long irqflags;
232
Christian Koenig736fc372012-05-17 19:52:00 +0200233 if (!rdev->ddev->irq_enabled)
234 return;
235
236 if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
237 spin_lock_irqsave(&rdev->irq.lock, irqflags);
Dave Airlie1614f8b2009-12-01 16:04:56 +1000238 radeon_irq_set(rdev);
Christian Koenig736fc372012-05-17 19:52:00 +0200239 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
Dave Airlie1614f8b2009-12-01 16:04:56 +1000240 }
Dave Airlie1614f8b2009-12-01 16:04:56 +1000241}
242
Alex Deucher6f34be52010-11-21 10:59:01 -0500243void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
244{
245 unsigned long irqflags;
246
247 if (crtc < 0 || crtc >= rdev->num_crtc)
248 return;
249
Christian Koenig736fc372012-05-17 19:52:00 +0200250 if (!rdev->ddev->irq_enabled)
251 return;
252
253 if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
254 spin_lock_irqsave(&rdev->irq.lock, irqflags);
Alex Deucher6f34be52010-11-21 10:59:01 -0500255 radeon_irq_set(rdev);
Christian Koenig736fc372012-05-17 19:52:00 +0200256 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
Alex Deucher6f34be52010-11-21 10:59:01 -0500257 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500258}
259
260void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
261{
262 unsigned long irqflags;
263
264 if (crtc < 0 || crtc >= rdev->num_crtc)
265 return;
266
Christian Koenig736fc372012-05-17 19:52:00 +0200267 if (!rdev->ddev->irq_enabled)
268 return;
269
270 if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
271 spin_lock_irqsave(&rdev->irq.lock, irqflags);
Alex Deucher6f34be52010-11-21 10:59:01 -0500272 radeon_irq_set(rdev);
Christian Koenig736fc372012-05-17 19:52:00 +0200273 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
Alex Deucher6f34be52010-11-21 10:59:01 -0500274 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500275}
276
Christian Koenigfb982572012-05-17 01:33:30 +0200277void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
278{
279 unsigned long irqflags;
280
281 spin_lock_irqsave(&rdev->irq.lock, irqflags);
282 rdev->irq.afmt[block] = true;
283 radeon_irq_set(rdev);
284 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
285
286}
287
288void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
289{
290 unsigned long irqflags;
291
292 spin_lock_irqsave(&rdev->irq.lock, irqflags);
293 rdev->irq.afmt[block] = false;
294 radeon_irq_set(rdev);
295 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
296}
297
298void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
299{
300 unsigned long irqflags;
301 int i;
302
303 spin_lock_irqsave(&rdev->irq.lock, irqflags);
304 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
305 rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
306 radeon_irq_set(rdev);
307 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
308}
309
310void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
311{
312 unsigned long irqflags;
313 int i;
314
315 spin_lock_irqsave(&rdev->irq.lock, irqflags);
316 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
317 rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
318 radeon_irq_set(rdev);
319 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
320}
321
322int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev)
323{
324 unsigned long irqflags;
325 int r;
326
327 spin_lock_irqsave(&rdev->irq.lock, irqflags);
328 rdev->irq.gui_idle = true;
329 radeon_irq_set(rdev);
330 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
331
332 r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev),
333 msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
334
335 spin_lock_irqsave(&rdev->irq.lock, irqflags);
336 rdev->irq.gui_idle = false;
337 radeon_irq_set(rdev);
338 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
339 return r;
340}