blob: 3b9d318cf166043fcf2d6216a5c13504ced17f93 [file] [log] [blame]
Christian König620f7742017-12-18 16:53:03 +01001/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu_ids.h"
24
25#include <linux/idr.h>
26#include <linux/dma-fence-array.h>
27#include <drm/drmP.h>
28
29#include "amdgpu.h"
30#include "amdgpu_trace.h"
31
32/*
33 * PASID manager
34 *
35 * PASIDs are global address space identifiers that can be shared
36 * between the GPU, an IOMMU and the driver. VMs on different devices
37 * may use the same PASID if they share the same address
38 * space. Therefore PASIDs are allocated using a global IDA. VMs are
39 * looked up from the PASID per amdgpu_device.
40 */
41static DEFINE_IDA(amdgpu_pasid_ida);
42
Christian König4b5f7552018-01-05 11:16:22 +010043/* Helper to free pasid from a fence callback */
44struct amdgpu_pasid_cb {
45 struct dma_fence_cb cb;
46 unsigned int pasid;
47};
48
Christian König620f7742017-12-18 16:53:03 +010049/**
50 * amdgpu_pasid_alloc - Allocate a PASID
51 * @bits: Maximum width of the PASID in bits, must be at least 1
52 *
53 * Allocates a PASID of the given width while keeping smaller PASIDs
54 * available if possible.
55 *
56 * Returns a positive integer on success. Returns %-EINVAL if bits==0.
57 * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
58 * memory allocation failure.
59 */
60int amdgpu_pasid_alloc(unsigned int bits)
61{
62 int pasid = -EINVAL;
63
64 for (bits = min(bits, 31U); bits > 0; bits--) {
65 pasid = ida_simple_get(&amdgpu_pasid_ida,
66 1U << (bits - 1), 1U << bits,
67 GFP_KERNEL);
68 if (pasid != -ENOSPC)
69 break;
70 }
71
Christian Königc35ff182018-01-09 19:32:58 +010072 if (pasid >= 0)
73 trace_amdgpu_pasid_allocated(pasid);
74
Christian König620f7742017-12-18 16:53:03 +010075 return pasid;
76}
77
78/**
79 * amdgpu_pasid_free - Free a PASID
80 * @pasid: PASID to free
81 */
82void amdgpu_pasid_free(unsigned int pasid)
83{
Christian Königc35ff182018-01-09 19:32:58 +010084 trace_amdgpu_pasid_freed(pasid);
Christian König620f7742017-12-18 16:53:03 +010085 ida_simple_remove(&amdgpu_pasid_ida, pasid);
86}
87
Christian König4b5f7552018-01-05 11:16:22 +010088static void amdgpu_pasid_free_cb(struct dma_fence *fence,
89 struct dma_fence_cb *_cb)
90{
91 struct amdgpu_pasid_cb *cb =
92 container_of(_cb, struct amdgpu_pasid_cb, cb);
93
94 amdgpu_pasid_free(cb->pasid);
95 dma_fence_put(fence);
96 kfree(cb);
97}
98
99/**
100 * amdgpu_pasid_free_delayed - free pasid when fences signal
101 *
102 * @resv: reservation object with the fences to wait for
103 * @pasid: pasid to free
104 *
105 * Free the pasid only after all the fences in resv are signaled.
106 */
107void amdgpu_pasid_free_delayed(struct reservation_object *resv,
108 unsigned int pasid)
109{
110 struct dma_fence *fence, **fences;
111 struct amdgpu_pasid_cb *cb;
112 unsigned count;
113 int r;
114
115 r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
116 if (r)
117 goto fallback;
118
119 if (count == 0) {
120 amdgpu_pasid_free(pasid);
121 return;
122 }
123
124 if (count == 1) {
125 fence = fences[0];
126 kfree(fences);
127 } else {
128 uint64_t context = dma_fence_context_alloc(1);
129 struct dma_fence_array *array;
130
131 array = dma_fence_array_create(count, fences, context,
132 1, false);
133 if (!array) {
134 kfree(fences);
135 goto fallback;
136 }
137 fence = &array->base;
138 }
139
140 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
141 if (!cb) {
142 /* Last resort when we are OOM */
143 dma_fence_wait(fence, false);
144 dma_fence_put(fence);
145 amdgpu_pasid_free(pasid);
146 } else {
147 cb->pasid = pasid;
148 if (dma_fence_add_callback(fence, &cb->cb,
149 amdgpu_pasid_free_cb))
150 amdgpu_pasid_free_cb(fence, &cb->cb);
151 }
152
153 return;
154
155fallback:
156 /* Not enough memory for the delayed delete, as last resort
157 * block for all the fences to complete.
158 */
159 reservation_object_wait_timeout_rcu(resv, true, false,
160 MAX_SCHEDULE_TIMEOUT);
161 amdgpu_pasid_free(pasid);
162}
163
Christian König620f7742017-12-18 16:53:03 +0100164/*
165 * VMID manager
166 *
167 * VMIDs are a per VMHUB identifier for page tables handling.
168 */
169
170/**
171 * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
172 *
173 * @adev: amdgpu_device pointer
174 * @id: VMID structure
175 *
176 * Check if GPU reset occured since last use of the VMID.
177 */
178bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
179 struct amdgpu_vmid *id)
180{
181 return id->current_gpu_reset_count !=
182 atomic_read(&adev->gpu_reset_counter);
183}
184
185/* idr_mgr->lock must be held */
186static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
187 struct amdgpu_ring *ring,
188 struct amdgpu_sync *sync,
189 struct dma_fence *fence,
190 struct amdgpu_job *job)
191{
192 struct amdgpu_device *adev = ring->adev;
193 unsigned vmhub = ring->funcs->vmhub;
194 uint64_t fence_context = adev->fence_context + ring->idx;
195 struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
196 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
197 struct dma_fence *updates = sync->last_vm_update;
198 int r = 0;
199 struct dma_fence *flushed, *tmp;
200 bool needs_flush = vm->use_cpu_for_update;
201
202 flushed = id->flushed_updates;
203 if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
Christian König0e36b9b2017-12-18 17:10:01 +0100204 (atomic64_read(&id->owner) != vm->entity.fence_context) ||
Christian König620f7742017-12-18 16:53:03 +0100205 (job->vm_pd_addr != id->pd_gpu_addr) ||
206 (updates && (!flushed || updates->context != flushed->context ||
207 dma_fence_is_later(updates, flushed))) ||
208 (!id->last_flush || (id->last_flush->context != fence_context &&
209 !dma_fence_is_signaled(id->last_flush)))) {
210 needs_flush = true;
211 /* to prevent one context starved by another context */
212 id->pd_gpu_addr = 0;
213 tmp = amdgpu_sync_peek_fence(&id->active, ring);
214 if (tmp) {
215 r = amdgpu_sync_fence(adev, sync, tmp, false);
216 return r;
217 }
218 }
219
220 /* Good we can use this VMID. Remember this submission as
221 * user of the VMID.
222 */
223 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
224 if (r)
225 goto out;
226
227 if (updates && (!flushed || updates->context != flushed->context ||
228 dma_fence_is_later(updates, flushed))) {
229 dma_fence_put(id->flushed_updates);
230 id->flushed_updates = dma_fence_get(updates);
231 }
232 id->pd_gpu_addr = job->vm_pd_addr;
Christian König0e36b9b2017-12-18 17:10:01 +0100233 atomic64_set(&id->owner, vm->entity.fence_context);
Christian König620f7742017-12-18 16:53:03 +0100234 job->vm_needs_flush = needs_flush;
235 if (needs_flush) {
236 dma_fence_put(id->last_flush);
237 id->last_flush = NULL;
238 }
Christian Königc4f46f22017-12-18 17:08:25 +0100239 job->vmid = id - id_mgr->ids;
Christian König620f7742017-12-18 16:53:03 +0100240 trace_amdgpu_vm_grab_id(vm, ring, job);
241out:
242 return r;
243}
244
245/**
246 * amdgpu_vm_grab_id - allocate the next free VMID
247 *
248 * @vm: vm to allocate id for
249 * @ring: ring we want to submit job to
250 * @sync: sync object where we add dependencies
251 * @fence: fence protecting ID from reuse
252 *
253 * Allocate an id for the vm, adding fences to the sync obj as necessary.
254 */
255int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
256 struct amdgpu_sync *sync, struct dma_fence *fence,
257 struct amdgpu_job *job)
258{
259 struct amdgpu_device *adev = ring->adev;
260 unsigned vmhub = ring->funcs->vmhub;
261 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
262 uint64_t fence_context = adev->fence_context + ring->idx;
263 struct dma_fence *updates = sync->last_vm_update;
264 struct amdgpu_vmid *id, *idle;
265 struct dma_fence **fences;
266 unsigned i;
267 int r = 0;
268
269 mutex_lock(&id_mgr->lock);
270 if (vm->reserved_vmid[vmhub]) {
271 r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job);
272 mutex_unlock(&id_mgr->lock);
273 return r;
274 }
275 fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
276 if (!fences) {
277 mutex_unlock(&id_mgr->lock);
278 return -ENOMEM;
279 }
280 /* Check if we have an idle VMID */
281 i = 0;
282 list_for_each_entry(idle, &id_mgr->ids_lru, list) {
283 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
284 if (!fences[i])
285 break;
286 ++i;
287 }
288
289 /* If we can't find a idle VMID to use, wait till one becomes available */
290 if (&idle->list == &id_mgr->ids_lru) {
291 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
292 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
293 struct dma_fence_array *array;
294 unsigned j;
295
296 for (j = 0; j < i; ++j)
297 dma_fence_get(fences[j]);
298
299 array = dma_fence_array_create(i, fences, fence_context,
300 seqno, true);
301 if (!array) {
302 for (j = 0; j < i; ++j)
303 dma_fence_put(fences[j]);
304 kfree(fences);
305 r = -ENOMEM;
306 goto error;
307 }
308
309
310 r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
311 dma_fence_put(&array->base);
312 if (r)
313 goto error;
314
315 mutex_unlock(&id_mgr->lock);
316 return 0;
317
318 }
319 kfree(fences);
320
321 job->vm_needs_flush = vm->use_cpu_for_update;
322 /* Check if we can use a VMID already assigned to this VM */
323 list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
324 struct dma_fence *flushed;
325 bool needs_flush = vm->use_cpu_for_update;
326
327 /* Check all the prerequisites to using this VMID */
328 if (amdgpu_vmid_had_gpu_reset(adev, id))
329 continue;
330
Christian König0e36b9b2017-12-18 17:10:01 +0100331 if (atomic64_read(&id->owner) != vm->entity.fence_context)
Christian König620f7742017-12-18 16:53:03 +0100332 continue;
333
334 if (job->vm_pd_addr != id->pd_gpu_addr)
335 continue;
336
337 if (!id->last_flush ||
338 (id->last_flush->context != fence_context &&
339 !dma_fence_is_signaled(id->last_flush)))
340 needs_flush = true;
341
342 flushed = id->flushed_updates;
343 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
344 needs_flush = true;
345
346 /* Concurrent flushes are only possible starting with Vega10 */
347 if (adev->asic_type < CHIP_VEGA10 && needs_flush)
348 continue;
349
350 /* Good we can use this VMID. Remember this submission as
351 * user of the VMID.
352 */
353 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
354 if (r)
355 goto error;
356
357 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
358 dma_fence_put(id->flushed_updates);
359 id->flushed_updates = dma_fence_get(updates);
360 }
361
362 if (needs_flush)
363 goto needs_flush;
364 else
365 goto no_flush_needed;
366
Fengguang Wu02d170e2018-01-05 07:06:46 +0800367 }
Christian König620f7742017-12-18 16:53:03 +0100368
369 /* Still no ID to use? Then use the idle one found earlier */
370 id = idle;
371
372 /* Remember this submission as user of the VMID */
373 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
374 if (r)
375 goto error;
376
377 id->pd_gpu_addr = job->vm_pd_addr;
378 dma_fence_put(id->flushed_updates);
379 id->flushed_updates = dma_fence_get(updates);
Christian König0e36b9b2017-12-18 17:10:01 +0100380 atomic64_set(&id->owner, vm->entity.fence_context);
Christian König620f7742017-12-18 16:53:03 +0100381
382needs_flush:
383 job->vm_needs_flush = true;
384 dma_fence_put(id->last_flush);
385 id->last_flush = NULL;
386
387no_flush_needed:
388 list_move_tail(&id->list, &id_mgr->ids_lru);
389
Christian Königc4f46f22017-12-18 17:08:25 +0100390 job->vmid = id - id_mgr->ids;
Christian König620f7742017-12-18 16:53:03 +0100391 trace_amdgpu_vm_grab_id(vm, ring, job);
392
393error:
394 mutex_unlock(&id_mgr->lock);
395 return r;
396}
397
398int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
399 struct amdgpu_vm *vm,
400 unsigned vmhub)
401{
402 struct amdgpu_vmid_mgr *id_mgr;
403 struct amdgpu_vmid *idle;
404 int r = 0;
405
406 id_mgr = &adev->vm_manager.id_mgr[vmhub];
407 mutex_lock(&id_mgr->lock);
408 if (vm->reserved_vmid[vmhub])
409 goto unlock;
410 if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
411 AMDGPU_VM_MAX_RESERVED_VMID) {
412 DRM_ERROR("Over limitation of reserved vmid\n");
413 atomic_dec(&id_mgr->reserved_vmid_num);
414 r = -EINVAL;
415 goto unlock;
416 }
417 /* Select the first entry VMID */
418 idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
419 list_del_init(&idle->list);
420 vm->reserved_vmid[vmhub] = idle;
421 mutex_unlock(&id_mgr->lock);
422
423 return 0;
424unlock:
425 mutex_unlock(&id_mgr->lock);
426 return r;
427}
428
429void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
430 struct amdgpu_vm *vm,
431 unsigned vmhub)
432{
433 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
434
435 mutex_lock(&id_mgr->lock);
436 if (vm->reserved_vmid[vmhub]) {
437 list_add(&vm->reserved_vmid[vmhub]->list,
438 &id_mgr->ids_lru);
439 vm->reserved_vmid[vmhub] = NULL;
440 atomic_dec(&id_mgr->reserved_vmid_num);
441 }
442 mutex_unlock(&id_mgr->lock);
443}
444
445/**
446 * amdgpu_vmid_reset - reset VMID to zero
447 *
448 * @adev: amdgpu device structure
Christian Königc4f46f22017-12-18 17:08:25 +0100449 * @vmid: vmid number to use
Christian König620f7742017-12-18 16:53:03 +0100450 *
451 * Reset saved GDW, GWS and OA to force switch on next flush.
452 */
453void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
454 unsigned vmid)
455{
456 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
457 struct amdgpu_vmid *id = &id_mgr->ids[vmid];
458
459 atomic64_set(&id->owner, 0);
460 id->gds_base = 0;
461 id->gds_size = 0;
462 id->gws_base = 0;
463 id->gws_size = 0;
464 id->oa_base = 0;
465 id->oa_size = 0;
466}
467
468/**
469 * amdgpu_vmid_reset_all - reset VMID to zero
470 *
471 * @adev: amdgpu device structure
472 *
473 * Reset VMID to force flush on next use
474 */
475void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
476{
477 unsigned i, j;
478
479 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
480 struct amdgpu_vmid_mgr *id_mgr =
481 &adev->vm_manager.id_mgr[i];
482
483 for (j = 1; j < id_mgr->num_ids; ++j)
484 amdgpu_vmid_reset(adev, i, j);
485 }
486}
487
488/**
489 * amdgpu_vmid_mgr_init - init the VMID manager
490 *
491 * @adev: amdgpu_device pointer
492 *
493 * Initialize the VM manager structures
494 */
495void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
496{
497 unsigned i, j;
498
499 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
500 struct amdgpu_vmid_mgr *id_mgr =
501 &adev->vm_manager.id_mgr[i];
502
503 mutex_init(&id_mgr->lock);
504 INIT_LIST_HEAD(&id_mgr->ids_lru);
505 atomic_set(&id_mgr->reserved_vmid_num, 0);
506
507 /* skip over VMID 0, since it is the system VM */
508 for (j = 1; j < id_mgr->num_ids; ++j) {
509 amdgpu_vmid_reset(adev, i, j);
510 amdgpu_sync_create(&id_mgr->ids[i].active);
511 list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
512 }
513 }
514
515 adev->vm_manager.fence_context =
516 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
517 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
518 adev->vm_manager.seqno[i] = 0;
519}
520
521/**
522 * amdgpu_vmid_mgr_fini - cleanup VM manager
523 *
524 * @adev: amdgpu_device pointer
525 *
526 * Cleanup the VM manager and free resources.
527 */
528void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
529{
530 unsigned i, j;
531
532 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
533 struct amdgpu_vmid_mgr *id_mgr =
534 &adev->vm_manager.id_mgr[i];
535
536 mutex_destroy(&id_mgr->lock);
537 for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
538 struct amdgpu_vmid *id = &id_mgr->ids[j];
539
540 amdgpu_sync_free(&id->active);
541 dma_fence_put(id->flushed_updates);
542 dma_fence_put(id->last_flush);
543 }
544 }
545}