blob: c5eb7a1461cfefd70730dcbc19e9fd9d9b982112 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
Dave Airlieeb1f8e42010-05-07 06:42:51 +000029#include "drm_crtc_helper.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020030#include "radeon_drm.h"
31#include "radeon_reg.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020032#include "radeon.h"
33#include "atom.h"
34
Christian Koenigfb982572012-05-17 01:33:30 +020035#define RADEON_WAIT_IDLE_TIMEOUT 200
36
Jerome Glisse771fe6b2009-06-05 14:42:42 +020037irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
38{
39 struct drm_device *dev = (struct drm_device *) arg;
40 struct radeon_device *rdev = dev->dev_private;
41
42 return radeon_irq_process(rdev);
43}
44
Alex Deucherd4877cf2009-12-04 16:56:37 -050045/*
46 * Handle hotplug events outside the interrupt handler proper.
47 */
48static void radeon_hotplug_work_func(struct work_struct *work)
49{
50 struct radeon_device *rdev = container_of(work, struct radeon_device,
51 hotplug_work);
52 struct drm_device *dev = rdev->ddev;
53 struct drm_mode_config *mode_config = &dev->mode_config;
54 struct drm_connector *connector;
55
56 if (mode_config->num_connector) {
57 list_for_each_entry(connector, &mode_config->connector_list, head)
58 radeon_connector_hotplug(connector);
59 }
60 /* Just fire off a uevent and let userspace tell us what to do */
Dave Airlieeb1f8e42010-05-07 06:42:51 +000061 drm_helper_hpd_irq_event(dev);
Alex Deucherd4877cf2009-12-04 16:56:37 -050062}
63
Jerome Glisse771fe6b2009-06-05 14:42:42 +020064void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
65{
66 struct radeon_device *rdev = dev->dev_private;
Christian Koenigfb982572012-05-17 01:33:30 +020067 unsigned long irqflags;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020068 unsigned i;
69
Christian Koenigfb982572012-05-17 01:33:30 +020070 spin_lock_irqsave(&rdev->irq.lock, irqflags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020071 /* Disable *all* interrupts */
Alex Deucher1b370782011-11-17 20:13:28 -050072 for (i = 0; i < RADEON_NUM_RINGS; i++)
73 rdev->irq.sw_int[i] = false;
Alex Deucher2031f772010-04-22 12:52:11 -040074 rdev->irq.gui_idle = false;
Ilija Hadzic54bd52062011-10-26 15:43:58 -040075 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
Alex Deucher9e7b4142010-03-16 17:08:06 -040076 rdev->irq.hpd[i] = false;
Ilija Hadzic54bd52062011-10-26 15:43:58 -040077 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
78 rdev->irq.crtc_vblank_int[i] = false;
Alex Deucher6f34be52010-11-21 10:59:01 -050079 rdev->irq.pflip[i] = false;
Alex Deucherf122c612012-03-30 08:59:57 -040080 rdev->irq.afmt[i] = false;
Alex Deucher6f34be52010-11-21 10:59:01 -050081 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +020082 radeon_irq_set(rdev);
Christian Koenigfb982572012-05-17 01:33:30 +020083 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020084 /* Clear bits */
85 radeon_irq_process(rdev);
86}
87
88int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
89{
90 struct radeon_device *rdev = dev->dev_private;
Christian Koenigfb982572012-05-17 01:33:30 +020091 unsigned long irqflags;
Alex Deucher1b370782011-11-17 20:13:28 -050092 unsigned i;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020093
94 dev->max_vblank_count = 0x001fffff;
Christian Koenigfb982572012-05-17 01:33:30 +020095 spin_lock_irqsave(&rdev->irq.lock, irqflags);
Alex Deucher1b370782011-11-17 20:13:28 -050096 for (i = 0; i < RADEON_NUM_RINGS; i++)
97 rdev->irq.sw_int[i] = true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020098 radeon_irq_set(rdev);
Christian Koenigfb982572012-05-17 01:33:30 +020099 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200100 return 0;
101}
102
103void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
104{
105 struct radeon_device *rdev = dev->dev_private;
Christian Koenigfb982572012-05-17 01:33:30 +0200106 unsigned long irqflags;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200107 unsigned i;
108
109 if (rdev == NULL) {
110 return;
111 }
Christian Koenigfb982572012-05-17 01:33:30 +0200112 spin_lock_irqsave(&rdev->irq.lock, irqflags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200113 /* Disable *all* interrupts */
Alex Deucher1b370782011-11-17 20:13:28 -0500114 for (i = 0; i < RADEON_NUM_RINGS; i++)
115 rdev->irq.sw_int[i] = false;
Alex Deucher2031f772010-04-22 12:52:11 -0400116 rdev->irq.gui_idle = false;
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400117 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
Jerome Glisse003e69f2010-01-07 15:39:14 +0100118 rdev->irq.hpd[i] = false;
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400119 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
120 rdev->irq.crtc_vblank_int[i] = false;
Alex Deucher6f34be52010-11-21 10:59:01 -0500121 rdev->irq.pflip[i] = false;
Alex Deucherf122c612012-03-30 08:59:57 -0400122 rdev->irq.afmt[i] = false;
Alex Deucher6f34be52010-11-21 10:59:01 -0500123 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200124 radeon_irq_set(rdev);
Christian Koenigfb982572012-05-17 01:33:30 +0200125 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200126}
127
Alex Deucher8f6c25c2011-10-25 14:58:49 -0400128static bool radeon_msi_ok(struct radeon_device *rdev)
129{
130 /* RV370/RV380 was first asic with MSI support */
131 if (rdev->family < CHIP_RV380)
132 return false;
133
134 /* MSIs don't work on AGP */
135 if (rdev->flags & RADEON_IS_AGP)
136 return false;
137
Alex Deuchera18cee12011-11-01 14:20:30 -0400138 /* force MSI on */
139 if (radeon_msi == 1)
140 return true;
141 else if (radeon_msi == 0)
142 return false;
143
Alex Deucherb3621052011-10-25 15:11:08 -0400144 /* Quirks */
145 /* HP RS690 only seems to work with MSIs. */
146 if ((rdev->pdev->device == 0x791f) &&
147 (rdev->pdev->subsystem_vendor == 0x103c) &&
148 (rdev->pdev->subsystem_device == 0x30c2))
149 return true;
150
Alex Deucher01e718e2011-11-01 14:14:18 -0400151 /* Dell RS690 only seems to work with MSIs. */
152 if ((rdev->pdev->device == 0x791f) &&
153 (rdev->pdev->subsystem_vendor == 0x1028) &&
Alex Deucher44517c42012-01-15 08:51:12 -0500154 (rdev->pdev->subsystem_device == 0x01fc))
155 return true;
156
157 /* Dell RS690 only seems to work with MSIs. */
158 if ((rdev->pdev->device == 0x791f) &&
159 (rdev->pdev->subsystem_vendor == 0x1028) &&
Alex Deucher01e718e2011-11-01 14:14:18 -0400160 (rdev->pdev->subsystem_device == 0x01fd))
161 return true;
162
Dave Airlie16a5e322012-04-13 11:14:50 +0100163 /* RV515 seems to have MSI issues where it loses
164 * MSI rearms occasionally. This leads to lockups and freezes.
165 * disable it by default.
166 */
167 if (rdev->family == CHIP_RV515)
168 return false;
Alex Deucher8f6c25c2011-10-25 14:58:49 -0400169 if (rdev->flags & RADEON_IS_IGP) {
170 /* APUs work fine with MSIs */
171 if (rdev->family >= CHIP_PALM)
172 return true;
173 /* lots of IGPs have problems with MSIs */
174 return false;
175 }
176
177 return true;
178}
179
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200180int radeon_irq_kms_init(struct radeon_device *rdev)
181{
182 int r = 0;
183
Tejun Heo32c87fc2011-01-03 14:49:32 +0100184 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
Alex Deucherf122c612012-03-30 08:59:57 -0400185 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
Tejun Heo32c87fc2011-01-03 14:49:32 +0100186
Christian Koenigfb982572012-05-17 01:33:30 +0200187 spin_lock_init(&rdev->irq.lock);
Alex Deucher9e7b4142010-03-16 17:08:06 -0400188 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200189 if (r) {
190 return r;
191 }
Alex Deucher3e5cb982009-10-16 12:21:24 -0400192 /* enable msi */
193 rdev->msi_enabled = 0;
Alex Deucher8f6c25c2011-10-25 14:58:49 -0400194
195 if (radeon_msi_ok(rdev)) {
Alex Deucher3e5cb982009-10-16 12:21:24 -0400196 int ret = pci_enable_msi(rdev->pdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500197 if (!ret) {
Alex Deucher3e5cb982009-10-16 12:21:24 -0400198 rdev->msi_enabled = 1;
Alex Deucherda7be682010-08-12 18:05:34 -0400199 dev_info(rdev->dev, "radeon: using MSI.\n");
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500200 }
Alex Deucher3e5cb982009-10-16 12:21:24 -0400201 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200202 rdev->irq.installed = true;
Jerome Glisse003e69f2010-01-07 15:39:14 +0100203 r = drm_irq_install(rdev->ddev);
204 if (r) {
205 rdev->irq.installed = false;
206 return r;
207 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200208 DRM_INFO("radeon: irq initialized.\n");
209 return 0;
210}
211
212void radeon_irq_kms_fini(struct radeon_device *rdev)
213{
Jerome Glisse003e69f2010-01-07 15:39:14 +0100214 drm_vblank_cleanup(rdev->ddev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200215 if (rdev->irq.installed) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200216 drm_irq_uninstall(rdev->ddev);
Jerome Glisse003e69f2010-01-07 15:39:14 +0100217 rdev->irq.installed = false;
Alex Deucher3e5cb982009-10-16 12:21:24 -0400218 if (rdev->msi_enabled)
219 pci_disable_msi(rdev->pdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200220 }
Tejun Heo32c87fc2011-01-03 14:49:32 +0100221 flush_work_sync(&rdev->hotplug_work);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200222}
Dave Airlie1614f8b2009-12-01 16:04:56 +1000223
Alex Deucher1b370782011-11-17 20:13:28 -0500224void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
Dave Airlie1614f8b2009-12-01 16:04:56 +1000225{
226 unsigned long irqflags;
227
Christian Koenigfb982572012-05-17 01:33:30 +0200228 spin_lock_irqsave(&rdev->irq.lock, irqflags);
Alex Deucher1b370782011-11-17 20:13:28 -0500229 if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
230 rdev->irq.sw_int[ring] = true;
Dave Airlie1614f8b2009-12-01 16:04:56 +1000231 radeon_irq_set(rdev);
232 }
Christian Koenigfb982572012-05-17 01:33:30 +0200233 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
Dave Airlie1614f8b2009-12-01 16:04:56 +1000234}
235
Alex Deucher1b370782011-11-17 20:13:28 -0500236void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
Dave Airlie1614f8b2009-12-01 16:04:56 +1000237{
238 unsigned long irqflags;
239
Christian Koenigfb982572012-05-17 01:33:30 +0200240 spin_lock_irqsave(&rdev->irq.lock, irqflags);
Alex Deucher1b370782011-11-17 20:13:28 -0500241 BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
242 if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
243 rdev->irq.sw_int[ring] = false;
Dave Airlie1614f8b2009-12-01 16:04:56 +1000244 radeon_irq_set(rdev);
245 }
Christian Koenigfb982572012-05-17 01:33:30 +0200246 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
Dave Airlie1614f8b2009-12-01 16:04:56 +1000247}
248
Alex Deucher6f34be52010-11-21 10:59:01 -0500249void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
250{
251 unsigned long irqflags;
252
253 if (crtc < 0 || crtc >= rdev->num_crtc)
254 return;
255
Christian Koenigfb982572012-05-17 01:33:30 +0200256 spin_lock_irqsave(&rdev->irq.lock, irqflags);
Alex Deucher6f34be52010-11-21 10:59:01 -0500257 if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
258 rdev->irq.pflip[crtc] = true;
259 radeon_irq_set(rdev);
260 }
Christian Koenigfb982572012-05-17 01:33:30 +0200261 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
Alex Deucher6f34be52010-11-21 10:59:01 -0500262}
263
264void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
265{
266 unsigned long irqflags;
267
268 if (crtc < 0 || crtc >= rdev->num_crtc)
269 return;
270
Christian Koenigfb982572012-05-17 01:33:30 +0200271 spin_lock_irqsave(&rdev->irq.lock, irqflags);
Alex Deucher6f34be52010-11-21 10:59:01 -0500272 BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
273 if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
274 rdev->irq.pflip[crtc] = false;
275 radeon_irq_set(rdev);
276 }
Christian Koenigfb982572012-05-17 01:33:30 +0200277 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
Alex Deucher6f34be52010-11-21 10:59:01 -0500278}
279
Christian Koenigfb982572012-05-17 01:33:30 +0200280void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
281{
282 unsigned long irqflags;
283
284 spin_lock_irqsave(&rdev->irq.lock, irqflags);
285 rdev->irq.afmt[block] = true;
286 radeon_irq_set(rdev);
287 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
288
289}
290
291void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
292{
293 unsigned long irqflags;
294
295 spin_lock_irqsave(&rdev->irq.lock, irqflags);
296 rdev->irq.afmt[block] = false;
297 radeon_irq_set(rdev);
298 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
299}
300
301void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
302{
303 unsigned long irqflags;
304 int i;
305
306 spin_lock_irqsave(&rdev->irq.lock, irqflags);
307 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
308 rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
309 radeon_irq_set(rdev);
310 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
311}
312
313void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
314{
315 unsigned long irqflags;
316 int i;
317
318 spin_lock_irqsave(&rdev->irq.lock, irqflags);
319 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
320 rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
321 radeon_irq_set(rdev);
322 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
323}
324
325int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev)
326{
327 unsigned long irqflags;
328 int r;
329
330 spin_lock_irqsave(&rdev->irq.lock, irqflags);
331 rdev->irq.gui_idle = true;
332 radeon_irq_set(rdev);
333 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
334
335 r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev),
336 msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
337
338 spin_lock_irqsave(&rdev->irq.lock, irqflags);
339 rdev->irq.gui_idle = false;
340 radeon_irq_set(rdev);
341 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
342 return r;
343}