Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2017 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
| 23 | #include "amdgpu_ids.h" |
| 24 | |
| 25 | #include <linux/idr.h> |
| 26 | #include <linux/dma-fence-array.h> |
| 27 | #include <drm/drmP.h> |
| 28 | |
| 29 | #include "amdgpu.h" |
| 30 | #include "amdgpu_trace.h" |
| 31 | |
| 32 | /* |
| 33 | * PASID manager |
| 34 | * |
| 35 | * PASIDs are global address space identifiers that can be shared |
| 36 | * between the GPU, an IOMMU and the driver. VMs on different devices |
| 37 | * may use the same PASID if they share the same address |
| 38 | * space. Therefore PASIDs are allocated using a global IDA. VMs are |
| 39 | * looked up from the PASID per amdgpu_device. |
| 40 | */ |
| 41 | static DEFINE_IDA(amdgpu_pasid_ida); |
| 42 | |
Christian König | 4b5f755 | 2018-01-05 11:16:22 +0100 | [diff] [blame] | 43 | /* Helper to free pasid from a fence callback */ |
| 44 | struct amdgpu_pasid_cb { |
| 45 | struct dma_fence_cb cb; |
| 46 | unsigned int pasid; |
| 47 | }; |
| 48 | |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 49 | /** |
| 50 | * amdgpu_pasid_alloc - Allocate a PASID |
| 51 | * @bits: Maximum width of the PASID in bits, must be at least 1 |
| 52 | * |
| 53 | * Allocates a PASID of the given width while keeping smaller PASIDs |
| 54 | * available if possible. |
| 55 | * |
| 56 | * Returns a positive integer on success. Returns %-EINVAL if bits==0. |
| 57 | * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on |
| 58 | * memory allocation failure. |
| 59 | */ |
| 60 | int amdgpu_pasid_alloc(unsigned int bits) |
| 61 | { |
| 62 | int pasid = -EINVAL; |
| 63 | |
| 64 | for (bits = min(bits, 31U); bits > 0; bits--) { |
| 65 | pasid = ida_simple_get(&amdgpu_pasid_ida, |
| 66 | 1U << (bits - 1), 1U << bits, |
| 67 | GFP_KERNEL); |
| 68 | if (pasid != -ENOSPC) |
| 69 | break; |
| 70 | } |
| 71 | |
Christian König | c35ff18 | 2018-01-09 19:32:58 +0100 | [diff] [blame] | 72 | if (pasid >= 0) |
| 73 | trace_amdgpu_pasid_allocated(pasid); |
| 74 | |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 75 | return pasid; |
| 76 | } |
| 77 | |
| 78 | /** |
| 79 | * amdgpu_pasid_free - Free a PASID |
| 80 | * @pasid: PASID to free |
| 81 | */ |
| 82 | void amdgpu_pasid_free(unsigned int pasid) |
| 83 | { |
Christian König | c35ff18 | 2018-01-09 19:32:58 +0100 | [diff] [blame] | 84 | trace_amdgpu_pasid_freed(pasid); |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 85 | ida_simple_remove(&amdgpu_pasid_ida, pasid); |
| 86 | } |
| 87 | |
Christian König | 4b5f755 | 2018-01-05 11:16:22 +0100 | [diff] [blame] | 88 | static void amdgpu_pasid_free_cb(struct dma_fence *fence, |
| 89 | struct dma_fence_cb *_cb) |
| 90 | { |
| 91 | struct amdgpu_pasid_cb *cb = |
| 92 | container_of(_cb, struct amdgpu_pasid_cb, cb); |
| 93 | |
| 94 | amdgpu_pasid_free(cb->pasid); |
| 95 | dma_fence_put(fence); |
| 96 | kfree(cb); |
| 97 | } |
| 98 | |
| 99 | /** |
| 100 | * amdgpu_pasid_free_delayed - free pasid when fences signal |
| 101 | * |
| 102 | * @resv: reservation object with the fences to wait for |
| 103 | * @pasid: pasid to free |
| 104 | * |
| 105 | * Free the pasid only after all the fences in resv are signaled. |
| 106 | */ |
| 107 | void amdgpu_pasid_free_delayed(struct reservation_object *resv, |
| 108 | unsigned int pasid) |
| 109 | { |
| 110 | struct dma_fence *fence, **fences; |
| 111 | struct amdgpu_pasid_cb *cb; |
| 112 | unsigned count; |
| 113 | int r; |
| 114 | |
| 115 | r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences); |
| 116 | if (r) |
| 117 | goto fallback; |
| 118 | |
| 119 | if (count == 0) { |
| 120 | amdgpu_pasid_free(pasid); |
| 121 | return; |
| 122 | } |
| 123 | |
| 124 | if (count == 1) { |
| 125 | fence = fences[0]; |
| 126 | kfree(fences); |
| 127 | } else { |
| 128 | uint64_t context = dma_fence_context_alloc(1); |
| 129 | struct dma_fence_array *array; |
| 130 | |
| 131 | array = dma_fence_array_create(count, fences, context, |
| 132 | 1, false); |
| 133 | if (!array) { |
| 134 | kfree(fences); |
| 135 | goto fallback; |
| 136 | } |
| 137 | fence = &array->base; |
| 138 | } |
| 139 | |
| 140 | cb = kmalloc(sizeof(*cb), GFP_KERNEL); |
| 141 | if (!cb) { |
| 142 | /* Last resort when we are OOM */ |
| 143 | dma_fence_wait(fence, false); |
| 144 | dma_fence_put(fence); |
| 145 | amdgpu_pasid_free(pasid); |
| 146 | } else { |
| 147 | cb->pasid = pasid; |
| 148 | if (dma_fence_add_callback(fence, &cb->cb, |
| 149 | amdgpu_pasid_free_cb)) |
| 150 | amdgpu_pasid_free_cb(fence, &cb->cb); |
| 151 | } |
| 152 | |
| 153 | return; |
| 154 | |
| 155 | fallback: |
| 156 | /* Not enough memory for the delayed delete, as last resort |
| 157 | * block for all the fences to complete. |
| 158 | */ |
| 159 | reservation_object_wait_timeout_rcu(resv, true, false, |
| 160 | MAX_SCHEDULE_TIMEOUT); |
| 161 | amdgpu_pasid_free(pasid); |
| 162 | } |
| 163 | |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 164 | /* |
| 165 | * VMID manager |
| 166 | * |
| 167 | * VMIDs are a per VMHUB identifier for page tables handling. |
| 168 | */ |
| 169 | |
| 170 | /** |
| 171 | * amdgpu_vmid_had_gpu_reset - check if reset occured since last use |
| 172 | * |
| 173 | * @adev: amdgpu_device pointer |
| 174 | * @id: VMID structure |
| 175 | * |
| 176 | * Check if GPU reset occured since last use of the VMID. |
| 177 | */ |
| 178 | bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, |
| 179 | struct amdgpu_vmid *id) |
| 180 | { |
| 181 | return id->current_gpu_reset_count != |
| 182 | atomic_read(&adev->gpu_reset_counter); |
| 183 | } |
| 184 | |
Christian König | 3a80e92 | 2018-01-31 11:10:19 +0100 | [diff] [blame] | 185 | /** |
| 186 | * amdgpu_vm_grab_idle - grab idle VMID |
| 187 | * |
| 188 | * @vm: vm to allocate id for |
| 189 | * @ring: ring we want to submit job to |
| 190 | * @sync: sync object where we add dependencies |
| 191 | * @idle: resulting idle VMID |
| 192 | * |
| 193 | * Try to find an idle VMID, if none is idle add a fence to wait to the sync |
| 194 | * object. Returns -ENOMEM when we are out of memory. |
| 195 | */ |
| 196 | static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, |
| 197 | struct amdgpu_ring *ring, |
| 198 | struct amdgpu_sync *sync, |
| 199 | struct amdgpu_vmid **idle) |
| 200 | { |
| 201 | struct amdgpu_device *adev = ring->adev; |
| 202 | unsigned vmhub = ring->funcs->vmhub; |
| 203 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
| 204 | struct dma_fence **fences; |
| 205 | unsigned i; |
| 206 | int r; |
| 207 | |
Christian König | 3af8144 | 2018-01-31 16:03:19 +0100 | [diff] [blame] | 208 | if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait)) |
| 209 | return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false); |
| 210 | |
Christian König | 3a80e92 | 2018-01-31 11:10:19 +0100 | [diff] [blame] | 211 | fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); |
| 212 | if (!fences) |
| 213 | return -ENOMEM; |
| 214 | |
| 215 | /* Check if we have an idle VMID */ |
| 216 | i = 0; |
| 217 | list_for_each_entry((*idle), &id_mgr->ids_lru, list) { |
| 218 | fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring); |
| 219 | if (!fences[i]) |
| 220 | break; |
| 221 | ++i; |
| 222 | } |
| 223 | |
| 224 | /* If we can't find a idle VMID to use, wait till one becomes available */ |
| 225 | if (&(*idle)->list == &id_mgr->ids_lru) { |
| 226 | u64 fence_context = adev->vm_manager.fence_context + ring->idx; |
| 227 | unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; |
| 228 | struct dma_fence_array *array; |
| 229 | unsigned j; |
| 230 | |
| 231 | *idle = NULL; |
| 232 | for (j = 0; j < i; ++j) |
| 233 | dma_fence_get(fences[j]); |
| 234 | |
| 235 | array = dma_fence_array_create(i, fences, fence_context, |
| 236 | seqno, true); |
| 237 | if (!array) { |
| 238 | for (j = 0; j < i; ++j) |
| 239 | dma_fence_put(fences[j]); |
| 240 | kfree(fences); |
| 241 | return -ENOMEM; |
| 242 | } |
| 243 | |
| 244 | r = amdgpu_sync_fence(adev, sync, &array->base, false); |
Christian König | 3af8144 | 2018-01-31 16:03:19 +0100 | [diff] [blame] | 245 | dma_fence_put(ring->vmid_wait); |
| 246 | ring->vmid_wait = &array->base; |
Christian König | 3a80e92 | 2018-01-31 11:10:19 +0100 | [diff] [blame] | 247 | return r; |
Christian König | 3a80e92 | 2018-01-31 11:10:19 +0100 | [diff] [blame] | 248 | } |
| 249 | kfree(fences); |
| 250 | |
| 251 | return 0; |
| 252 | } |
| 253 | |
Christian König | cb5372a | 2018-01-31 11:56:53 +0100 | [diff] [blame] | 254 | /** |
| 255 | * amdgpu_vm_grab_reserved - try to assign reserved VMID |
| 256 | * |
| 257 | * @vm: vm to allocate id for |
| 258 | * @ring: ring we want to submit job to |
| 259 | * @sync: sync object where we add dependencies |
| 260 | * @fence: fence protecting ID from reuse |
| 261 | * @job: job who wants to use the VMID |
| 262 | * |
| 263 | * Try to assign a reserved VMID. |
| 264 | */ |
| 265 | static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, |
| 266 | struct amdgpu_ring *ring, |
| 267 | struct amdgpu_sync *sync, |
| 268 | struct dma_fence *fence, |
Christian König | 58592a0 | 2018-01-31 14:24:45 +0100 | [diff] [blame] | 269 | struct amdgpu_job *job, |
| 270 | struct amdgpu_vmid **id) |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 271 | { |
| 272 | struct amdgpu_device *adev = ring->adev; |
| 273 | unsigned vmhub = ring->funcs->vmhub; |
| 274 | uint64_t fence_context = adev->fence_context + ring->idx; |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 275 | struct dma_fence *updates = sync->last_vm_update; |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 276 | bool needs_flush = vm->use_cpu_for_update; |
Christian König | cb5372a | 2018-01-31 11:56:53 +0100 | [diff] [blame] | 277 | int r = 0; |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 278 | |
Christian König | 58592a0 | 2018-01-31 14:24:45 +0100 | [diff] [blame] | 279 | *id = vm->reserved_vmid[vmhub]; |
| 280 | if (updates && (*id)->flushed_updates && |
| 281 | updates->context == (*id)->flushed_updates->context && |
| 282 | !dma_fence_is_later(updates, (*id)->flushed_updates)) |
Christian König | cb5372a | 2018-01-31 11:56:53 +0100 | [diff] [blame] | 283 | updates = NULL; |
| 284 | |
Christian König | 58592a0 | 2018-01-31 14:24:45 +0100 | [diff] [blame] | 285 | if ((*id)->owner != vm->entity.fence_context || |
| 286 | job->vm_pd_addr != (*id)->pd_gpu_addr || |
| 287 | updates || !(*id)->last_flush || |
| 288 | ((*id)->last_flush->context != fence_context && |
| 289 | !dma_fence_is_signaled((*id)->last_flush))) { |
Christian König | cb5372a | 2018-01-31 11:56:53 +0100 | [diff] [blame] | 290 | struct dma_fence *tmp; |
| 291 | |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 292 | /* to prevent one context starved by another context */ |
Christian König | 58592a0 | 2018-01-31 14:24:45 +0100 | [diff] [blame] | 293 | (*id)->pd_gpu_addr = 0; |
| 294 | tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 295 | if (tmp) { |
Christian König | 58592a0 | 2018-01-31 14:24:45 +0100 | [diff] [blame] | 296 | *id = NULL; |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 297 | r = amdgpu_sync_fence(adev, sync, tmp, false); |
| 298 | return r; |
| 299 | } |
Christian König | cb5372a | 2018-01-31 11:56:53 +0100 | [diff] [blame] | 300 | needs_flush = true; |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 301 | } |
| 302 | |
| 303 | /* Good we can use this VMID. Remember this submission as |
| 304 | * user of the VMID. |
| 305 | */ |
Christian König | 58592a0 | 2018-01-31 14:24:45 +0100 | [diff] [blame] | 306 | r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false); |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 307 | if (r) |
Christian König | cb5372a | 2018-01-31 11:56:53 +0100 | [diff] [blame] | 308 | return r; |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 309 | |
Christian König | cb5372a | 2018-01-31 11:56:53 +0100 | [diff] [blame] | 310 | if (updates) { |
Christian König | 58592a0 | 2018-01-31 14:24:45 +0100 | [diff] [blame] | 311 | dma_fence_put((*id)->flushed_updates); |
| 312 | (*id)->flushed_updates = dma_fence_get(updates); |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 313 | } |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 314 | job->vm_needs_flush = needs_flush; |
Christian König | cb5372a | 2018-01-31 11:56:53 +0100 | [diff] [blame] | 315 | return 0; |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 316 | } |
| 317 | |
| 318 | /** |
Christian König | 25ddf75 | 2018-01-31 13:35:25 +0100 | [diff] [blame] | 319 | * amdgpu_vm_grab_used - try to reuse a VMID |
| 320 | * |
| 321 | * @vm: vm to allocate id for |
| 322 | * @ring: ring we want to submit job to |
| 323 | * @sync: sync object where we add dependencies |
| 324 | * @fence: fence protecting ID from reuse |
| 325 | * @job: job who wants to use the VMID |
| 326 | * @id: resulting VMID |
| 327 | * |
| 328 | * Try to reuse a VMID for this submission. |
| 329 | */ |
| 330 | static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, |
| 331 | struct amdgpu_ring *ring, |
| 332 | struct amdgpu_sync *sync, |
| 333 | struct dma_fence *fence, |
| 334 | struct amdgpu_job *job, |
| 335 | struct amdgpu_vmid **id) |
| 336 | { |
| 337 | struct amdgpu_device *adev = ring->adev; |
| 338 | unsigned vmhub = ring->funcs->vmhub; |
| 339 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
| 340 | uint64_t fence_context = adev->fence_context + ring->idx; |
| 341 | struct dma_fence *updates = sync->last_vm_update; |
| 342 | int r; |
| 343 | |
| 344 | job->vm_needs_flush = vm->use_cpu_for_update; |
| 345 | |
| 346 | /* Check if we can use a VMID already assigned to this VM */ |
| 347 | list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) { |
| 348 | bool needs_flush = vm->use_cpu_for_update; |
| 349 | struct dma_fence *flushed; |
| 350 | |
| 351 | /* Check all the prerequisites to using this VMID */ |
| 352 | if ((*id)->owner != vm->entity.fence_context) |
| 353 | continue; |
| 354 | |
| 355 | if ((*id)->pd_gpu_addr != job->vm_pd_addr) |
| 356 | continue; |
| 357 | |
| 358 | if (!(*id)->last_flush || |
| 359 | ((*id)->last_flush->context != fence_context && |
| 360 | !dma_fence_is_signaled((*id)->last_flush))) |
| 361 | needs_flush = true; |
| 362 | |
| 363 | flushed = (*id)->flushed_updates; |
| 364 | if (updates && (!flushed || dma_fence_is_later(updates, flushed))) |
| 365 | needs_flush = true; |
| 366 | |
| 367 | /* Concurrent flushes are only possible starting with Vega10 */ |
| 368 | if (adev->asic_type < CHIP_VEGA10 && needs_flush) |
| 369 | continue; |
| 370 | |
| 371 | /* Good, we can use this VMID. Remember this submission as |
| 372 | * user of the VMID. |
| 373 | */ |
| 374 | r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false); |
| 375 | if (r) |
| 376 | return r; |
| 377 | |
| 378 | if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { |
| 379 | dma_fence_put((*id)->flushed_updates); |
| 380 | (*id)->flushed_updates = dma_fence_get(updates); |
| 381 | } |
| 382 | |
| 383 | job->vm_needs_flush |= needs_flush; |
| 384 | return 0; |
| 385 | } |
| 386 | |
| 387 | *id = NULL; |
| 388 | return 0; |
| 389 | } |
| 390 | |
| 391 | /** |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 392 | * amdgpu_vm_grab_id - allocate the next free VMID |
| 393 | * |
| 394 | * @vm: vm to allocate id for |
| 395 | * @ring: ring we want to submit job to |
| 396 | * @sync: sync object where we add dependencies |
| 397 | * @fence: fence protecting ID from reuse |
Christian König | cb5372a | 2018-01-31 11:56:53 +0100 | [diff] [blame] | 398 | * @job: job who wants to use the VMID |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 399 | * |
| 400 | * Allocate an id for the vm, adding fences to the sync obj as necessary. |
| 401 | */ |
| 402 | int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
| 403 | struct amdgpu_sync *sync, struct dma_fence *fence, |
| 404 | struct amdgpu_job *job) |
| 405 | { |
| 406 | struct amdgpu_device *adev = ring->adev; |
| 407 | unsigned vmhub = ring->funcs->vmhub; |
| 408 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
Harry Wentland | ed024578 | 2018-02-09 12:15:45 -0500 | [diff] [blame] | 409 | struct amdgpu_vmid *idle = NULL; |
| 410 | struct amdgpu_vmid *id = NULL; |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 411 | int r = 0; |
| 412 | |
| 413 | mutex_lock(&id_mgr->lock); |
Christian König | 3a80e92 | 2018-01-31 11:10:19 +0100 | [diff] [blame] | 414 | r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle); |
| 415 | if (r || !idle) |
| 416 | goto error; |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 417 | |
Christian König | 8fe27f8 | 2018-01-31 10:16:26 +0100 | [diff] [blame] | 418 | if (vm->reserved_vmid[vmhub]) { |
Christian König | 58592a0 | 2018-01-31 14:24:45 +0100 | [diff] [blame] | 419 | r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id); |
| 420 | if (r || !id) |
| 421 | goto error; |
| 422 | } else { |
| 423 | r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id); |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 424 | if (r) |
| 425 | goto error; |
| 426 | |
Christian König | 58592a0 | 2018-01-31 14:24:45 +0100 | [diff] [blame] | 427 | if (!id) { |
| 428 | struct dma_fence *updates = sync->last_vm_update; |
| 429 | |
| 430 | /* Still no ID to use? Then use the idle one found earlier */ |
| 431 | id = idle; |
| 432 | |
| 433 | /* Remember this submission as user of the VMID */ |
| 434 | r = amdgpu_sync_fence(ring->adev, &id->active, |
| 435 | fence, false); |
| 436 | if (r) |
| 437 | goto error; |
| 438 | |
| 439 | dma_fence_put(id->flushed_updates); |
| 440 | id->flushed_updates = dma_fence_get(updates); |
| 441 | job->vm_needs_flush = true; |
| 442 | } |
| 443 | |
| 444 | list_move_tail(&id->list, &id_mgr->ids_lru); |
Fengguang Wu | 02d170e | 2018-01-05 07:06:46 +0800 | [diff] [blame] | 445 | } |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 446 | |
Christian König | 58592a0 | 2018-01-31 14:24:45 +0100 | [diff] [blame] | 447 | id->pd_gpu_addr = job->vm_pd_addr; |
| 448 | id->owner = vm->entity.fence_context; |
| 449 | |
Christian König | 25ddf75 | 2018-01-31 13:35:25 +0100 | [diff] [blame] | 450 | if (job->vm_needs_flush) { |
| 451 | dma_fence_put(id->last_flush); |
| 452 | id->last_flush = NULL; |
| 453 | } |
Christian König | c4f46f2 | 2017-12-18 17:08:25 +0100 | [diff] [blame] | 454 | job->vmid = id - id_mgr->ids; |
Christian König | 5a4633c | 2018-01-08 14:48:11 +0100 | [diff] [blame] | 455 | job->pasid = vm->pasid; |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 456 | trace_amdgpu_vm_grab_id(vm, ring, job); |
| 457 | |
| 458 | error: |
| 459 | mutex_unlock(&id_mgr->lock); |
| 460 | return r; |
| 461 | } |
| 462 | |
| 463 | int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, |
| 464 | struct amdgpu_vm *vm, |
| 465 | unsigned vmhub) |
| 466 | { |
| 467 | struct amdgpu_vmid_mgr *id_mgr; |
| 468 | struct amdgpu_vmid *idle; |
| 469 | int r = 0; |
| 470 | |
| 471 | id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
| 472 | mutex_lock(&id_mgr->lock); |
| 473 | if (vm->reserved_vmid[vmhub]) |
| 474 | goto unlock; |
| 475 | if (atomic_inc_return(&id_mgr->reserved_vmid_num) > |
| 476 | AMDGPU_VM_MAX_RESERVED_VMID) { |
| 477 | DRM_ERROR("Over limitation of reserved vmid\n"); |
| 478 | atomic_dec(&id_mgr->reserved_vmid_num); |
| 479 | r = -EINVAL; |
| 480 | goto unlock; |
| 481 | } |
| 482 | /* Select the first entry VMID */ |
| 483 | idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list); |
| 484 | list_del_init(&idle->list); |
| 485 | vm->reserved_vmid[vmhub] = idle; |
| 486 | mutex_unlock(&id_mgr->lock); |
| 487 | |
| 488 | return 0; |
| 489 | unlock: |
| 490 | mutex_unlock(&id_mgr->lock); |
| 491 | return r; |
| 492 | } |
| 493 | |
| 494 | void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, |
| 495 | struct amdgpu_vm *vm, |
| 496 | unsigned vmhub) |
| 497 | { |
| 498 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
| 499 | |
| 500 | mutex_lock(&id_mgr->lock); |
| 501 | if (vm->reserved_vmid[vmhub]) { |
| 502 | list_add(&vm->reserved_vmid[vmhub]->list, |
| 503 | &id_mgr->ids_lru); |
| 504 | vm->reserved_vmid[vmhub] = NULL; |
| 505 | atomic_dec(&id_mgr->reserved_vmid_num); |
| 506 | } |
| 507 | mutex_unlock(&id_mgr->lock); |
| 508 | } |
| 509 | |
| 510 | /** |
| 511 | * amdgpu_vmid_reset - reset VMID to zero |
| 512 | * |
| 513 | * @adev: amdgpu device structure |
Christian König | c4f46f2 | 2017-12-18 17:08:25 +0100 | [diff] [blame] | 514 | * @vmid: vmid number to use |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 515 | * |
| 516 | * Reset saved GDW, GWS and OA to force switch on next flush. |
| 517 | */ |
| 518 | void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, |
| 519 | unsigned vmid) |
| 520 | { |
| 521 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
| 522 | struct amdgpu_vmid *id = &id_mgr->ids[vmid]; |
| 523 | |
Christian König | 1023744 | 2018-01-31 11:17:56 +0100 | [diff] [blame] | 524 | mutex_lock(&id_mgr->lock); |
| 525 | id->owner = 0; |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 526 | id->gds_base = 0; |
| 527 | id->gds_size = 0; |
| 528 | id->gws_base = 0; |
| 529 | id->gws_size = 0; |
| 530 | id->oa_base = 0; |
| 531 | id->oa_size = 0; |
Christian König | 1023744 | 2018-01-31 11:17:56 +0100 | [diff] [blame] | 532 | mutex_unlock(&id_mgr->lock); |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 533 | } |
| 534 | |
| 535 | /** |
| 536 | * amdgpu_vmid_reset_all - reset VMID to zero |
| 537 | * |
| 538 | * @adev: amdgpu device structure |
| 539 | * |
| 540 | * Reset VMID to force flush on next use |
| 541 | */ |
| 542 | void amdgpu_vmid_reset_all(struct amdgpu_device *adev) |
| 543 | { |
| 544 | unsigned i, j; |
| 545 | |
| 546 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
| 547 | struct amdgpu_vmid_mgr *id_mgr = |
| 548 | &adev->vm_manager.id_mgr[i]; |
| 549 | |
| 550 | for (j = 1; j < id_mgr->num_ids; ++j) |
| 551 | amdgpu_vmid_reset(adev, i, j); |
| 552 | } |
| 553 | } |
| 554 | |
| 555 | /** |
| 556 | * amdgpu_vmid_mgr_init - init the VMID manager |
| 557 | * |
| 558 | * @adev: amdgpu_device pointer |
| 559 | * |
| 560 | * Initialize the VM manager structures |
| 561 | */ |
| 562 | void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) |
| 563 | { |
| 564 | unsigned i, j; |
| 565 | |
| 566 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
| 567 | struct amdgpu_vmid_mgr *id_mgr = |
| 568 | &adev->vm_manager.id_mgr[i]; |
| 569 | |
| 570 | mutex_init(&id_mgr->lock); |
| 571 | INIT_LIST_HEAD(&id_mgr->ids_lru); |
| 572 | atomic_set(&id_mgr->reserved_vmid_num, 0); |
| 573 | |
| 574 | /* skip over VMID 0, since it is the system VM */ |
| 575 | for (j = 1; j < id_mgr->num_ids; ++j) { |
| 576 | amdgpu_vmid_reset(adev, i, j); |
| 577 | amdgpu_sync_create(&id_mgr->ids[i].active); |
| 578 | list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); |
| 579 | } |
| 580 | } |
| 581 | |
| 582 | adev->vm_manager.fence_context = |
| 583 | dma_fence_context_alloc(AMDGPU_MAX_RINGS); |
| 584 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
| 585 | adev->vm_manager.seqno[i] = 0; |
| 586 | } |
| 587 | |
| 588 | /** |
| 589 | * amdgpu_vmid_mgr_fini - cleanup VM manager |
| 590 | * |
| 591 | * @adev: amdgpu_device pointer |
| 592 | * |
| 593 | * Cleanup the VM manager and free resources. |
| 594 | */ |
| 595 | void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev) |
| 596 | { |
| 597 | unsigned i, j; |
| 598 | |
| 599 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
| 600 | struct amdgpu_vmid_mgr *id_mgr = |
| 601 | &adev->vm_manager.id_mgr[i]; |
| 602 | |
| 603 | mutex_destroy(&id_mgr->lock); |
| 604 | for (j = 0; j < AMDGPU_NUM_VMID; ++j) { |
| 605 | struct amdgpu_vmid *id = &id_mgr->ids[j]; |
| 606 | |
| 607 | amdgpu_sync_free(&id->active); |
| 608 | dma_fence_put(id->flushed_updates); |
| 609 | dma_fence_put(id->last_flush); |
Christian König | b3cd285 | 2018-02-05 17:38:01 +0100 | [diff] [blame] | 610 | dma_fence_put(id->pasid_mapping); |
Christian König | 620f774 | 2017-12-18 16:53:03 +0100 | [diff] [blame] | 611 | } |
| 612 | } |
| 613 | } |