| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright 2008 Advanced Micro Devices, Inc. | 
|  | 3 | * Copyright 2008 Red Hat Inc. | 
|  | 4 | * Copyright 2009 Jerome Glisse. | 
|  | 5 | * | 
|  | 6 | * Permission is hereby granted, free of charge, to any person obtaining a | 
|  | 7 | * copy of this software and associated documentation files (the "Software"), | 
|  | 8 | * to deal in the Software without restriction, including without limitation | 
|  | 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
|  | 10 | * and/or sell copies of the Software, and to permit persons to whom the | 
|  | 11 | * Software is furnished to do so, subject to the following conditions: | 
|  | 12 | * | 
|  | 13 | * The above copyright notice and this permission notice shall be included in | 
|  | 14 | * all copies or substantial portions of the Software. | 
|  | 15 | * | 
|  | 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
|  | 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
|  | 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | 
|  | 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 
|  | 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 
|  | 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 
|  | 22 | * OTHER DEALINGS IN THE SOFTWARE. | 
|  | 23 | * | 
|  | 24 | * Authors: Dave Airlie | 
|  | 25 | *          Alex Deucher | 
|  | 26 | *          Jerome Glisse | 
|  | 27 | */ | 
| Dave Airlie | e9c5e74 | 2016-01-14 08:07:55 +1000 | [diff] [blame] | 28 | #include <linux/irq.h> | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 29 | #include <drm/drmP.h> | 
|  | 30 | #include <drm/drm_crtc_helper.h> | 
|  | 31 | #include <drm/amdgpu_drm.h> | 
|  | 32 | #include "amdgpu.h" | 
|  | 33 | #include "amdgpu_ih.h" | 
|  | 34 | #include "atom.h" | 
|  | 35 | #include "amdgpu_connectors.h" | 
|  | 36 |  | 
|  | 37 | #include <linux/pm_runtime.h> | 
|  | 38 |  | 
|  | 39 | #define AMDGPU_WAIT_IDLE_TIMEOUT 200 | 
|  | 40 |  | 
|  | 41 | /* | 
|  | 42 | * Handle hotplug events outside the interrupt handler proper. | 
|  | 43 | */ | 
|  | 44 | /** | 
|  | 45 | * amdgpu_hotplug_work_func - display hotplug work handler | 
|  | 46 | * | 
|  | 47 | * @work: work struct | 
|  | 48 | * | 
|  | 49 | * This is the hot plug event work handler (all asics). | 
|  | 50 | * The work gets scheduled from the irq handler if there | 
|  | 51 | * was a hot plug interrupt.  It walks the connector table | 
|  | 52 | * and calls the hotplug handler for each one, then sends | 
|  | 53 | * a drm hotplug event to alert userspace. | 
|  | 54 | */ | 
|  | 55 | static void amdgpu_hotplug_work_func(struct work_struct *work) | 
|  | 56 | { | 
|  | 57 | struct amdgpu_device *adev = container_of(work, struct amdgpu_device, | 
|  | 58 | hotplug_work); | 
|  | 59 | struct drm_device *dev = adev->ddev; | 
|  | 60 | struct drm_mode_config *mode_config = &dev->mode_config; | 
|  | 61 | struct drm_connector *connector; | 
|  | 62 |  | 
| Alex Deucher | 9e14c65 | 2015-05-15 11:52:18 -0400 | [diff] [blame] | 63 | mutex_lock(&mode_config->mutex); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 64 | if (mode_config->num_connector) { | 
|  | 65 | list_for_each_entry(connector, &mode_config->connector_list, head) | 
|  | 66 | amdgpu_connector_hotplug(connector); | 
|  | 67 | } | 
| Alex Deucher | 9e14c65 | 2015-05-15 11:52:18 -0400 | [diff] [blame] | 68 | mutex_unlock(&mode_config->mutex); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 69 | /* Just fire off a uevent and let userspace tell us what to do */ | 
|  | 70 | drm_helper_hpd_irq_event(dev); | 
|  | 71 | } | 
|  | 72 |  | 
|  | 73 | /** | 
|  | 74 | * amdgpu_irq_reset_work_func - execute gpu reset | 
|  | 75 | * | 
|  | 76 | * @work: work struct | 
|  | 77 | * | 
|  | 78 | * Execute scheduled gpu reset (cayman+). | 
|  | 79 | * This function is called when the irq handler | 
|  | 80 | * thinks we need a gpu reset. | 
|  | 81 | */ | 
|  | 82 | static void amdgpu_irq_reset_work_func(struct work_struct *work) | 
|  | 83 | { | 
|  | 84 | struct amdgpu_device *adev = container_of(work, struct amdgpu_device, | 
|  | 85 | reset_work); | 
|  | 86 |  | 
|  | 87 | amdgpu_gpu_reset(adev); | 
|  | 88 | } | 
|  | 89 |  | 
|  | 90 | /* Disable *all* interrupts */ | 
|  | 91 | static void amdgpu_irq_disable_all(struct amdgpu_device *adev) | 
|  | 92 | { | 
|  | 93 | unsigned long irqflags; | 
|  | 94 | unsigned i, j; | 
|  | 95 | int r; | 
|  | 96 |  | 
|  | 97 | spin_lock_irqsave(&adev->irq.lock, irqflags); | 
|  | 98 | for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { | 
|  | 99 | struct amdgpu_irq_src *src = adev->irq.sources[i]; | 
|  | 100 |  | 
|  | 101 | if (!src || !src->funcs->set || !src->num_types) | 
|  | 102 | continue; | 
|  | 103 |  | 
|  | 104 | for (j = 0; j < src->num_types; ++j) { | 
|  | 105 | atomic_set(&src->enabled_types[j], 0); | 
|  | 106 | r = src->funcs->set(adev, src, j, | 
|  | 107 | AMDGPU_IRQ_STATE_DISABLE); | 
|  | 108 | if (r) | 
|  | 109 | DRM_ERROR("error disabling interrupt (%d)\n", | 
|  | 110 | r); | 
|  | 111 | } | 
|  | 112 | } | 
|  | 113 | spin_unlock_irqrestore(&adev->irq.lock, irqflags); | 
|  | 114 | } | 
|  | 115 |  | 
|  | 116 | /** | 
|  | 117 | * amdgpu_irq_preinstall - drm irq preinstall callback | 
|  | 118 | * | 
|  | 119 | * @dev: drm dev pointer | 
|  | 120 | * | 
|  | 121 | * Gets the hw ready to enable irqs (all asics). | 
|  | 122 | * This function disables all interrupt sources on the GPU. | 
|  | 123 | */ | 
|  | 124 | void amdgpu_irq_preinstall(struct drm_device *dev) | 
|  | 125 | { | 
|  | 126 | struct amdgpu_device *adev = dev->dev_private; | 
|  | 127 |  | 
|  | 128 | /* Disable *all* interrupts */ | 
|  | 129 | amdgpu_irq_disable_all(adev); | 
|  | 130 | /* Clear bits */ | 
|  | 131 | amdgpu_ih_process(adev); | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 | /** | 
|  | 135 | * amdgpu_irq_postinstall - drm irq preinstall callback | 
|  | 136 | * | 
|  | 137 | * @dev: drm dev pointer | 
|  | 138 | * | 
|  | 139 | * Handles stuff to be done after enabling irqs (all asics). | 
|  | 140 | * Returns 0 on success. | 
|  | 141 | */ | 
|  | 142 | int amdgpu_irq_postinstall(struct drm_device *dev) | 
|  | 143 | { | 
| Alex Deucher | 5a6adfa | 2015-09-22 10:06:45 -0400 | [diff] [blame] | 144 | dev->max_vblank_count = 0x00ffffff; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 145 | return 0; | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | /** | 
|  | 149 | * amdgpu_irq_uninstall - drm irq uninstall callback | 
|  | 150 | * | 
|  | 151 | * @dev: drm dev pointer | 
|  | 152 | * | 
|  | 153 | * This function disables all interrupt sources on the GPU (all asics). | 
|  | 154 | */ | 
|  | 155 | void amdgpu_irq_uninstall(struct drm_device *dev) | 
|  | 156 | { | 
|  | 157 | struct amdgpu_device *adev = dev->dev_private; | 
|  | 158 |  | 
|  | 159 | if (adev == NULL) { | 
|  | 160 | return; | 
|  | 161 | } | 
|  | 162 | amdgpu_irq_disable_all(adev); | 
|  | 163 | } | 
|  | 164 |  | 
|  | 165 | /** | 
|  | 166 | * amdgpu_irq_handler - irq handler | 
|  | 167 | * | 
|  | 168 | * @int irq, void *arg: args | 
|  | 169 | * | 
|  | 170 | * This is the irq handler for the amdgpu driver (all asics). | 
|  | 171 | */ | 
|  | 172 | irqreturn_t amdgpu_irq_handler(int irq, void *arg) | 
|  | 173 | { | 
|  | 174 | struct drm_device *dev = (struct drm_device *) arg; | 
|  | 175 | struct amdgpu_device *adev = dev->dev_private; | 
|  | 176 | irqreturn_t ret; | 
|  | 177 |  | 
|  | 178 | ret = amdgpu_ih_process(adev); | 
|  | 179 | if (ret == IRQ_HANDLED) | 
|  | 180 | pm_runtime_mark_last_busy(dev->dev); | 
|  | 181 | return ret; | 
|  | 182 | } | 
|  | 183 |  | 
|  | 184 | /** | 
|  | 185 | * amdgpu_msi_ok - asic specific msi checks | 
|  | 186 | * | 
|  | 187 | * @adev: amdgpu device pointer | 
|  | 188 | * | 
|  | 189 | * Handles asic specific MSI checks to determine if | 
|  | 190 | * MSIs should be enabled on a particular chip (all asics). | 
|  | 191 | * Returns true if MSIs should be enabled, false if MSIs | 
|  | 192 | * should not be enabled. | 
|  | 193 | */ | 
|  | 194 | static bool amdgpu_msi_ok(struct amdgpu_device *adev) | 
|  | 195 | { | 
|  | 196 | /* force MSI on */ | 
|  | 197 | if (amdgpu_msi == 1) | 
|  | 198 | return true; | 
|  | 199 | else if (amdgpu_msi == 0) | 
|  | 200 | return false; | 
|  | 201 |  | 
|  | 202 | return true; | 
|  | 203 | } | 
|  | 204 |  | 
|  | 205 | /** | 
|  | 206 | * amdgpu_irq_init - init driver interrupt info | 
|  | 207 | * | 
|  | 208 | * @adev: amdgpu device pointer | 
|  | 209 | * | 
|  | 210 | * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics). | 
|  | 211 | * Returns 0 for success, error for failure. | 
|  | 212 | */ | 
|  | 213 | int amdgpu_irq_init(struct amdgpu_device *adev) | 
|  | 214 | { | 
|  | 215 | int r = 0; | 
|  | 216 |  | 
|  | 217 | spin_lock_init(&adev->irq.lock); | 
|  | 218 | r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); | 
|  | 219 | if (r) { | 
|  | 220 | return r; | 
|  | 221 | } | 
|  | 222 | /* enable msi */ | 
|  | 223 | adev->irq.msi_enabled = false; | 
|  | 224 |  | 
|  | 225 | if (amdgpu_msi_ok(adev)) { | 
|  | 226 | int ret = pci_enable_msi(adev->pdev); | 
|  | 227 | if (!ret) { | 
|  | 228 | adev->irq.msi_enabled = true; | 
|  | 229 | dev_info(adev->dev, "amdgpu: using MSI.\n"); | 
|  | 230 | } | 
|  | 231 | } | 
|  | 232 |  | 
|  | 233 | INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func); | 
|  | 234 | INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func); | 
|  | 235 |  | 
|  | 236 | adev->irq.installed = true; | 
|  | 237 | r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); | 
|  | 238 | if (r) { | 
|  | 239 | adev->irq.installed = false; | 
|  | 240 | flush_work(&adev->hotplug_work); | 
|  | 241 | return r; | 
|  | 242 | } | 
|  | 243 |  | 
|  | 244 | DRM_INFO("amdgpu: irq initialized.\n"); | 
|  | 245 | return 0; | 
|  | 246 | } | 
|  | 247 |  | 
|  | 248 | /** | 
|  | 249 | * amdgpu_irq_fini - tear down driver interrupt info | 
|  | 250 | * | 
|  | 251 | * @adev: amdgpu device pointer | 
|  | 252 | * | 
|  | 253 | * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics). | 
|  | 254 | */ | 
|  | 255 | void amdgpu_irq_fini(struct amdgpu_device *adev) | 
|  | 256 | { | 
|  | 257 | unsigned i; | 
|  | 258 |  | 
|  | 259 | drm_vblank_cleanup(adev->ddev); | 
|  | 260 | if (adev->irq.installed) { | 
|  | 261 | drm_irq_uninstall(adev->ddev); | 
|  | 262 | adev->irq.installed = false; | 
|  | 263 | if (adev->irq.msi_enabled) | 
|  | 264 | pci_disable_msi(adev->pdev); | 
|  | 265 | flush_work(&adev->hotplug_work); | 
|  | 266 | } | 
|  | 267 |  | 
|  | 268 | for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { | 
|  | 269 | struct amdgpu_irq_src *src = adev->irq.sources[i]; | 
|  | 270 |  | 
|  | 271 | if (!src) | 
|  | 272 | continue; | 
|  | 273 |  | 
|  | 274 | kfree(src->enabled_types); | 
|  | 275 | src->enabled_types = NULL; | 
| Alex Deucher | 0cf3be2 | 2015-07-28 14:24:53 -0400 | [diff] [blame] | 276 | if (src->data) { | 
|  | 277 | kfree(src->data); | 
|  | 278 | kfree(src); | 
|  | 279 | adev->irq.sources[i] = NULL; | 
|  | 280 | } | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 281 | } | 
|  | 282 | } | 
|  | 283 |  | 
|  | 284 | /** | 
|  | 285 | * amdgpu_irq_add_id - register irq source | 
|  | 286 | * | 
|  | 287 | * @adev: amdgpu device pointer | 
|  | 288 | * @src_id: source id for this source | 
|  | 289 | * @source: irq source | 
|  | 290 | * | 
|  | 291 | */ | 
|  | 292 | int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id, | 
|  | 293 | struct amdgpu_irq_src *source) | 
|  | 294 | { | 
|  | 295 | if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) | 
|  | 296 | return -EINVAL; | 
|  | 297 |  | 
|  | 298 | if (adev->irq.sources[src_id] != NULL) | 
|  | 299 | return -EINVAL; | 
|  | 300 |  | 
|  | 301 | if (!source->funcs) | 
|  | 302 | return -EINVAL; | 
|  | 303 |  | 
|  | 304 | if (source->num_types && !source->enabled_types) { | 
|  | 305 | atomic_t *types; | 
|  | 306 |  | 
|  | 307 | types = kcalloc(source->num_types, sizeof(atomic_t), | 
|  | 308 | GFP_KERNEL); | 
|  | 309 | if (!types) | 
|  | 310 | return -ENOMEM; | 
|  | 311 |  | 
|  | 312 | source->enabled_types = types; | 
|  | 313 | } | 
|  | 314 |  | 
|  | 315 | adev->irq.sources[src_id] = source; | 
| Alex Deucher | 5f23236 | 2015-11-06 01:29:08 -0500 | [diff] [blame] | 316 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 317 | return 0; | 
|  | 318 | } | 
|  | 319 |  | 
|  | 320 | /** | 
|  | 321 | * amdgpu_irq_dispatch - dispatch irq to IP blocks | 
|  | 322 | * | 
|  | 323 | * @adev: amdgpu device pointer | 
|  | 324 | * @entry: interrupt vector | 
|  | 325 | * | 
|  | 326 | * Dispatches the irq to the different IP blocks | 
|  | 327 | */ | 
|  | 328 | void amdgpu_irq_dispatch(struct amdgpu_device *adev, | 
|  | 329 | struct amdgpu_iv_entry *entry) | 
|  | 330 | { | 
|  | 331 | unsigned src_id = entry->src_id; | 
|  | 332 | struct amdgpu_irq_src *src; | 
|  | 333 | int r; | 
|  | 334 |  | 
|  | 335 | if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { | 
|  | 336 | DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); | 
|  | 337 | return; | 
|  | 338 | } | 
|  | 339 |  | 
| Alex Deucher | 5f23236 | 2015-11-06 01:29:08 -0500 | [diff] [blame] | 340 | if (adev->irq.virq[src_id]) { | 
|  | 341 | generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); | 
|  | 342 | } else { | 
|  | 343 | src = adev->irq.sources[src_id]; | 
|  | 344 | if (!src) { | 
|  | 345 | DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); | 
|  | 346 | return; | 
|  | 347 | } | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 348 |  | 
| Alex Deucher | 5f23236 | 2015-11-06 01:29:08 -0500 | [diff] [blame] | 349 | r = src->funcs->process(adev, src, entry); | 
|  | 350 | if (r) | 
|  | 351 | DRM_ERROR("error processing interrupt (%d)\n", r); | 
|  | 352 | } | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 353 | } | 
|  | 354 |  | 
|  | 355 | /** | 
|  | 356 | * amdgpu_irq_update - update hw interrupt state | 
|  | 357 | * | 
|  | 358 | * @adev: amdgpu device pointer | 
|  | 359 | * @src: interrupt src you want to enable | 
|  | 360 | * @type: type of interrupt you want to update | 
|  | 361 | * | 
|  | 362 | * Updates the interrupt state for a specific src (all asics). | 
|  | 363 | */ | 
|  | 364 | int amdgpu_irq_update(struct amdgpu_device *adev, | 
|  | 365 | struct amdgpu_irq_src *src, unsigned type) | 
|  | 366 | { | 
|  | 367 | unsigned long irqflags; | 
|  | 368 | enum amdgpu_interrupt_state state; | 
|  | 369 | int r; | 
|  | 370 |  | 
|  | 371 | spin_lock_irqsave(&adev->irq.lock, irqflags); | 
|  | 372 |  | 
|  | 373 | /* we need to determine after taking the lock, otherwise | 
|  | 374 | we might disable just enabled interrupts again */ | 
|  | 375 | if (amdgpu_irq_enabled(adev, src, type)) | 
|  | 376 | state = AMDGPU_IRQ_STATE_ENABLE; | 
|  | 377 | else | 
|  | 378 | state = AMDGPU_IRQ_STATE_DISABLE; | 
|  | 379 |  | 
|  | 380 | r = src->funcs->set(adev, src, type, state); | 
|  | 381 | spin_unlock_irqrestore(&adev->irq.lock, irqflags); | 
|  | 382 | return r; | 
|  | 383 | } | 
|  | 384 |  | 
|  | 385 | /** | 
|  | 386 | * amdgpu_irq_get - enable interrupt | 
|  | 387 | * | 
|  | 388 | * @adev: amdgpu device pointer | 
|  | 389 | * @src: interrupt src you want to enable | 
|  | 390 | * @type: type of interrupt you want to enable | 
|  | 391 | * | 
|  | 392 | * Enables the interrupt type for a specific src (all asics). | 
|  | 393 | */ | 
|  | 394 | int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, | 
|  | 395 | unsigned type) | 
|  | 396 | { | 
|  | 397 | if (!adev->ddev->irq_enabled) | 
|  | 398 | return -ENOENT; | 
|  | 399 |  | 
|  | 400 | if (type >= src->num_types) | 
|  | 401 | return -EINVAL; | 
|  | 402 |  | 
|  | 403 | if (!src->enabled_types || !src->funcs->set) | 
|  | 404 | return -EINVAL; | 
|  | 405 |  | 
|  | 406 | if (atomic_inc_return(&src->enabled_types[type]) == 1) | 
|  | 407 | return amdgpu_irq_update(adev, src, type); | 
|  | 408 |  | 
|  | 409 | return 0; | 
|  | 410 | } | 
|  | 411 |  | 
|  | 412 | bool amdgpu_irq_get_delayed(struct amdgpu_device *adev, | 
|  | 413 | struct amdgpu_irq_src *src, | 
|  | 414 | unsigned type) | 
|  | 415 | { | 
|  | 416 | if ((type >= src->num_types) || !src->enabled_types) | 
|  | 417 | return false; | 
|  | 418 | return atomic_inc_return(&src->enabled_types[type]) == 1; | 
|  | 419 | } | 
|  | 420 |  | 
|  | 421 | /** | 
|  | 422 | * amdgpu_irq_put - disable interrupt | 
|  | 423 | * | 
|  | 424 | * @adev: amdgpu device pointer | 
|  | 425 | * @src: interrupt src you want to disable | 
|  | 426 | * @type: type of interrupt you want to disable | 
|  | 427 | * | 
|  | 428 | * Disables the interrupt type for a specific src (all asics). | 
|  | 429 | */ | 
|  | 430 | int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, | 
|  | 431 | unsigned type) | 
|  | 432 | { | 
|  | 433 | if (!adev->ddev->irq_enabled) | 
|  | 434 | return -ENOENT; | 
|  | 435 |  | 
|  | 436 | if (type >= src->num_types) | 
|  | 437 | return -EINVAL; | 
|  | 438 |  | 
|  | 439 | if (!src->enabled_types || !src->funcs->set) | 
|  | 440 | return -EINVAL; | 
|  | 441 |  | 
|  | 442 | if (atomic_dec_and_test(&src->enabled_types[type])) | 
|  | 443 | return amdgpu_irq_update(adev, src, type); | 
|  | 444 |  | 
|  | 445 | return 0; | 
|  | 446 | } | 
|  | 447 |  | 
|  | 448 | /** | 
|  | 449 | * amdgpu_irq_enabled - test if irq is enabled or not | 
|  | 450 | * | 
|  | 451 | * @adev: amdgpu device pointer | 
|  | 452 | * @idx: interrupt src you want to test | 
|  | 453 | * | 
|  | 454 | * Tests if the given interrupt source is enabled or not | 
|  | 455 | */ | 
|  | 456 | bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, | 
|  | 457 | unsigned type) | 
|  | 458 | { | 
|  | 459 | if (!adev->ddev->irq_enabled) | 
|  | 460 | return false; | 
|  | 461 |  | 
|  | 462 | if (type >= src->num_types) | 
|  | 463 | return false; | 
|  | 464 |  | 
|  | 465 | if (!src->enabled_types || !src->funcs->set) | 
|  | 466 | return false; | 
|  | 467 |  | 
|  | 468 | return !!atomic_read(&src->enabled_types[type]); | 
|  | 469 | } | 
| Alex Deucher | 5f23236 | 2015-11-06 01:29:08 -0500 | [diff] [blame] | 470 |  | 
|  | 471 | /* gen irq */ | 
|  | 472 | static void amdgpu_irq_mask(struct irq_data *irqd) | 
|  | 473 | { | 
|  | 474 | /* XXX */ | 
|  | 475 | } | 
|  | 476 |  | 
|  | 477 | static void amdgpu_irq_unmask(struct irq_data *irqd) | 
|  | 478 | { | 
|  | 479 | /* XXX */ | 
|  | 480 | } | 
|  | 481 |  | 
|  | 482 | static struct irq_chip amdgpu_irq_chip = { | 
|  | 483 | .name = "amdgpu-ih", | 
|  | 484 | .irq_mask = amdgpu_irq_mask, | 
|  | 485 | .irq_unmask = amdgpu_irq_unmask, | 
|  | 486 | }; | 
|  | 487 |  | 
|  | 488 | static int amdgpu_irqdomain_map(struct irq_domain *d, | 
|  | 489 | unsigned int irq, irq_hw_number_t hwirq) | 
|  | 490 | { | 
|  | 491 | if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID) | 
|  | 492 | return -EPERM; | 
|  | 493 |  | 
|  | 494 | irq_set_chip_and_handler(irq, | 
|  | 495 | &amdgpu_irq_chip, handle_simple_irq); | 
|  | 496 | return 0; | 
|  | 497 | } | 
|  | 498 |  | 
|  | 499 | static struct irq_domain_ops amdgpu_hw_irqdomain_ops = { | 
|  | 500 | .map = amdgpu_irqdomain_map, | 
|  | 501 | }; | 
|  | 502 |  | 
|  | 503 | /** | 
|  | 504 | * amdgpu_irq_add_domain - create a linear irq domain | 
|  | 505 | * | 
|  | 506 | * @adev: amdgpu device pointer | 
|  | 507 | * | 
|  | 508 | * Create an irq domain for GPU interrupt sources | 
|  | 509 | * that may be driven by another driver (e.g., ACP). | 
|  | 510 | */ | 
|  | 511 | int amdgpu_irq_add_domain(struct amdgpu_device *adev) | 
|  | 512 | { | 
|  | 513 | adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, | 
|  | 514 | &amdgpu_hw_irqdomain_ops, adev); | 
|  | 515 | if (!adev->irq.domain) { | 
|  | 516 | DRM_ERROR("GPU irq add domain failed\n"); | 
|  | 517 | return -ENODEV; | 
|  | 518 | } | 
|  | 519 |  | 
|  | 520 | return 0; | 
|  | 521 | } | 
|  | 522 |  | 
|  | 523 | /** | 
|  | 524 | * amdgpu_irq_remove_domain - remove the irq domain | 
|  | 525 | * | 
|  | 526 | * @adev: amdgpu device pointer | 
|  | 527 | * | 
|  | 528 | * Remove the irq domain for GPU interrupt sources | 
|  | 529 | * that may be driven by another driver (e.g., ACP). | 
|  | 530 | */ | 
|  | 531 | void amdgpu_irq_remove_domain(struct amdgpu_device *adev) | 
|  | 532 | { | 
|  | 533 | if (adev->irq.domain) { | 
|  | 534 | irq_domain_remove(adev->irq.domain); | 
|  | 535 | adev->irq.domain = NULL; | 
|  | 536 | } | 
|  | 537 | } | 
|  | 538 |  | 
|  | 539 | /** | 
|  | 540 | * amdgpu_irq_create_mapping - create a mapping between a domain irq and a | 
|  | 541 | *                             Linux irq | 
|  | 542 | * | 
|  | 543 | * @adev: amdgpu device pointer | 
|  | 544 | * @src_id: IH source id | 
|  | 545 | * | 
|  | 546 | * Create a mapping between a domain irq (GPU IH src id) and a Linux irq | 
|  | 547 | * Use this for components that generate a GPU interrupt, but are driven | 
|  | 548 | * by a different driver (e.g., ACP). | 
|  | 549 | * Returns the Linux irq. | 
|  | 550 | */ | 
|  | 551 | unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id) | 
|  | 552 | { | 
|  | 553 | adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id); | 
|  | 554 |  | 
|  | 555 | return adev->irq.virq[src_id]; | 
|  | 556 | } |