blob: d90254f5ca6a0dff79e016c661eb45cf63fd396a [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <drm/drmP.h>
29#include <drm/amdgpu_drm.h>
30#include "amdgpu.h"
31#include "amdgpu_trace.h"
32
33/*
34 * GPUVM
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
50 * SI supports 16.
51 */
52
53/**
54 * amdgpu_vm_num_pde - return the number of page directory entries
55 *
56 * @adev: amdgpu_device pointer
57 *
58 * Calculate the number of page directory entries (cayman+).
59 */
60static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
61{
62 return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
63}
64
65/**
66 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
67 *
68 * @adev: amdgpu_device pointer
69 *
70 * Calculate the size of the page directory in bytes (cayman+).
71 */
72static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
73{
74 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
75}
76
77/**
78 * amdgpu_vm_get_bos - add the vm BOs to a validation list
79 *
80 * @vm: vm providing the BOs
81 * @head: head of validation list
82 *
83 * Add the page directory to the list of BOs to
84 * validate for command submission (cayman+).
85 */
86struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
87 struct amdgpu_vm *vm,
88 struct list_head *head)
89{
90 struct amdgpu_bo_list_entry *list;
91 unsigned i, idx;
92
monk.liu3d5a08c2015-05-26 10:22:41 +080093 mutex_lock(&vm->mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040094 list = drm_malloc_ab(vm->max_pde_used + 2,
95 sizeof(struct amdgpu_bo_list_entry));
monk.liu3d5a08c2015-05-26 10:22:41 +080096 if (!list) {
97 mutex_unlock(&vm->mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040098 return NULL;
monk.liu3d5a08c2015-05-26 10:22:41 +080099 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400100
101 /* add the vm page table to the list */
102 list[0].robj = vm->page_directory;
103 list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
104 list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
105 list[0].priority = 0;
106 list[0].tv.bo = &vm->page_directory->tbo;
107 list[0].tv.shared = true;
108 list_add(&list[0].tv.head, head);
109
110 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
111 if (!vm->page_tables[i].bo)
112 continue;
113
114 list[idx].robj = vm->page_tables[i].bo;
115 list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
116 list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
117 list[idx].priority = 0;
118 list[idx].tv.bo = &list[idx].robj->tbo;
119 list[idx].tv.shared = true;
120 list_add(&list[idx++].tv.head, head);
121 }
monk.liu3d5a08c2015-05-26 10:22:41 +0800122 mutex_unlock(&vm->mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400123
124 return list;
125}
126
127/**
128 * amdgpu_vm_grab_id - allocate the next free VMID
129 *
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400130 * @vm: vm to allocate id for
Christian König7f8a5292015-07-20 16:09:40 +0200131 * @ring: ring we want to submit job to
132 * @sync: sync object where we add dependencies
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400133 *
Christian König7f8a5292015-07-20 16:09:40 +0200134 * Allocate an id for the vm, adding fences to the sync obj as necessary.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400135 *
Christian König7f8a5292015-07-20 16:09:40 +0200136 * Global mutex must be locked!
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400137 */
Christian König7f8a5292015-07-20 16:09:40 +0200138int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
139 struct amdgpu_sync *sync)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400140{
141 struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
142 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
143 struct amdgpu_device *adev = ring->adev;
144
145 unsigned choices[2] = {};
146 unsigned i;
147
148 /* check if the id is still valid */
149 if (vm_id->id && vm_id->last_id_use &&
150 vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
Christian König7f8a5292015-07-20 16:09:40 +0200151 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400152
153 /* we definately need to flush */
154 vm_id->pd_gpu_addr = ~0ll;
155
156 /* skip over VMID 0, since it is the system VM */
157 for (i = 1; i < adev->vm_manager.nvm; ++i) {
158 struct amdgpu_fence *fence = adev->vm_manager.active[i];
159
160 if (fence == NULL) {
161 /* found a free one */
162 vm_id->id = i;
163 trace_amdgpu_vm_grab_id(i, ring->idx);
Christian König7f8a5292015-07-20 16:09:40 +0200164 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400165 }
166
167 if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
168 best[fence->ring->idx] = fence;
169 choices[fence->ring == ring ? 0 : 1] = i;
170 }
171 }
172
173 for (i = 0; i < 2; ++i) {
174 if (choices[i]) {
Christian König7f8a5292015-07-20 16:09:40 +0200175 struct amdgpu_fence *fence;
176
177 fence = adev->vm_manager.active[choices[i]];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400178 vm_id->id = choices[i];
Christian König7f8a5292015-07-20 16:09:40 +0200179
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400180 trace_amdgpu_vm_grab_id(choices[i], ring->idx);
Christian König7f8a5292015-07-20 16:09:40 +0200181 return amdgpu_sync_fence(ring->adev, sync, &fence->base);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400182 }
183 }
184
185 /* should never happen */
186 BUG();
Christian König7f8a5292015-07-20 16:09:40 +0200187 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400188}
189
190/**
191 * amdgpu_vm_flush - hardware flush the vm
192 *
193 * @ring: ring to use for flush
194 * @vm: vm we want to flush
195 * @updates: last vm update that we waited for
196 *
197 * Flush the vm (cayman+).
198 *
199 * Global and local mutex must be locked!
200 */
201void amdgpu_vm_flush(struct amdgpu_ring *ring,
202 struct amdgpu_vm *vm,
203 struct amdgpu_fence *updates)
204{
205 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
206 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
Christian Königfc8fa5e2015-07-20 15:47:30 +0200207 struct amdgpu_fence *flushed_updates = vm_id->flushed_updates;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400208
Christian Königfc8fa5e2015-07-20 15:47:30 +0200209 if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
210 (updates && amdgpu_fence_is_earlier(flushed_updates, updates))) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400211
212 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
Christian Königfc8fa5e2015-07-20 15:47:30 +0200213 vm_id->flushed_updates = amdgpu_fence_ref(
214 amdgpu_fence_later(flushed_updates, updates));
215 amdgpu_fence_unref(&flushed_updates);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400216 vm_id->pd_gpu_addr = pd_addr;
217 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
218 }
219}
220
221/**
222 * amdgpu_vm_fence - remember fence for vm
223 *
224 * @adev: amdgpu_device pointer
225 * @vm: vm we want to fence
226 * @fence: fence to remember
227 *
228 * Fence the vm (cayman+).
229 * Set the fence used to protect page table and id.
230 *
231 * Global and local mutex must be locked!
232 */
233void amdgpu_vm_fence(struct amdgpu_device *adev,
234 struct amdgpu_vm *vm,
235 struct amdgpu_fence *fence)
236{
237 unsigned ridx = fence->ring->idx;
238 unsigned vm_id = vm->ids[ridx].id;
239
240 amdgpu_fence_unref(&adev->vm_manager.active[vm_id]);
241 adev->vm_manager.active[vm_id] = amdgpu_fence_ref(fence);
242
243 amdgpu_fence_unref(&vm->ids[ridx].last_id_use);
244 vm->ids[ridx].last_id_use = amdgpu_fence_ref(fence);
245}
246
247/**
248 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
249 *
250 * @vm: requested vm
251 * @bo: requested buffer object
252 *
253 * Find @bo inside the requested vm (cayman+).
254 * Search inside the @bos vm list for the requested vm
255 * Returns the found bo_va or NULL if none is found
256 *
257 * Object has to be reserved!
258 */
259struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
260 struct amdgpu_bo *bo)
261{
262 struct amdgpu_bo_va *bo_va;
263
264 list_for_each_entry(bo_va, &bo->va, bo_list) {
265 if (bo_va->vm == vm) {
266 return bo_va;
267 }
268 }
269 return NULL;
270}
271
272/**
273 * amdgpu_vm_update_pages - helper to call the right asic function
274 *
275 * @adev: amdgpu_device pointer
276 * @ib: indirect buffer to fill with commands
277 * @pe: addr of the page entry
278 * @addr: dst addr to write into pe
279 * @count: number of page entries to update
280 * @incr: increase next addr by incr bytes
281 * @flags: hw access flags
282 * @gtt_flags: GTT hw access flags
283 *
284 * Traces the parameters and calls the right asic functions
285 * to setup the page table using the DMA.
286 */
287static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
288 struct amdgpu_ib *ib,
289 uint64_t pe, uint64_t addr,
290 unsigned count, uint32_t incr,
291 uint32_t flags, uint32_t gtt_flags)
292{
293 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
294
295 if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
296 uint64_t src = adev->gart.table_addr + (addr >> 12) * 8;
297 amdgpu_vm_copy_pte(adev, ib, pe, src, count);
298
299 } else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) {
300 amdgpu_vm_write_pte(adev, ib, pe, addr,
301 count, incr, flags);
302
303 } else {
304 amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
305 count, incr, flags);
306 }
307}
308
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800309static int amdgpu_vm_free_job(
310 struct amdgpu_cs_parser *sched_job)
311{
312 int i;
313 for (i = 0; i < sched_job->num_ibs; i++)
314 amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
315 kfree(sched_job->ibs);
316 return 0;
317}
318
319static int amdgpu_vm_run_job(
320 struct amdgpu_cs_parser *sched_job)
321{
322 amdgpu_bo_fence(sched_job->job_param.vm.bo,
Chunming Zhoue40a3112015-08-03 11:38:09 +0800323 &sched_job->ibs[sched_job->num_ibs -1].fence->base, true);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800324 return 0;
325}
326
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400327/**
328 * amdgpu_vm_clear_bo - initially clear the page dir/table
329 *
330 * @adev: amdgpu_device pointer
331 * @bo: bo to clear
332 */
333static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
334 struct amdgpu_bo *bo)
335{
336 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800337 struct amdgpu_cs_parser *sched_job = NULL;
338 struct amdgpu_ib *ib;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400339 unsigned entries;
340 uint64_t addr;
341 int r;
342
343 r = amdgpu_bo_reserve(bo, false);
344 if (r)
345 return r;
346
monk.liuca952612015-05-25 14:44:05 +0800347 r = reservation_object_reserve_shared(bo->tbo.resv);
348 if (r)
349 return r;
350
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400351 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
352 if (r)
353 goto error_unreserve;
354
355 addr = amdgpu_bo_gpu_offset(bo);
356 entries = amdgpu_bo_size(bo) / 8;
357
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800358 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
359 if (!ib)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400360 goto error_unreserve;
361
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800362 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400363 if (r)
364 goto error_free;
365
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800366 ib->length_dw = 0;
367
368 amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
369 amdgpu_vm_pad_ib(adev, ib);
370 WARN_ON(ib->length_dw > 64);
371
372 if (amdgpu_enable_scheduler) {
373 int r;
374 uint64_t v_seq;
375 sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
376 adev->kernel_ctx, ib, 1);
377 if(!sched_job)
378 goto error_free;
379 sched_job->job_param.vm.bo = bo;
380 sched_job->run_job = amdgpu_vm_run_job;
381 sched_job->free_job = amdgpu_vm_free_job;
382 v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
Chunming Zhoud1ff9082015-07-30 17:59:43 +0800383 ib->sequence = v_seq;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800384 amd_sched_push_job(ring->scheduler,
385 &adev->kernel_ctx->rings[ring->idx].c_entity,
386 sched_job);
387 r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
388 v_seq,
Chunming Zhou51b9db22015-07-28 17:31:04 +0800389 false,
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800390 -1);
391 if (r)
392 DRM_ERROR("emit timeout\n");
393
394 amdgpu_bo_unreserve(bo);
395 return 0;
396 } else {
397 r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
398 if (r)
399 goto error_free;
Chunming Zhoue40a3112015-08-03 11:38:09 +0800400 amdgpu_bo_fence(bo, &ib->fence->base, true);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800401 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400402
403error_free:
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800404 amdgpu_ib_free(adev, ib);
405 kfree(ib);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400406
407error_unreserve:
408 amdgpu_bo_unreserve(bo);
409 return r;
410}
411
412/**
413 * amdgpu_vm_map_gart - get the physical address of a gart page
414 *
415 * @adev: amdgpu_device pointer
416 * @addr: the unmapped addr
417 *
418 * Look up the physical address of the page that the pte resolves
419 * to (cayman+).
420 * Returns the physical address of the page.
421 */
422uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr)
423{
424 uint64_t result;
425
426 /* page table offset */
427 result = adev->gart.pages_addr[addr >> PAGE_SHIFT];
428
429 /* in case cpu page size != gpu page size*/
430 result |= addr & (~PAGE_MASK);
431
432 return result;
433}
434
435/**
436 * amdgpu_vm_update_pdes - make sure that page directory is valid
437 *
438 * @adev: amdgpu_device pointer
439 * @vm: requested vm
440 * @start: start of GPU address range
441 * @end: end of GPU address range
442 *
443 * Allocates new page tables if necessary
444 * and updates the page directory (cayman+).
445 * Returns 0 for success, error for failure.
446 *
447 * Global and local mutex must be locked!
448 */
449int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
450 struct amdgpu_vm *vm)
451{
452 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
453 struct amdgpu_bo *pd = vm->page_directory;
454 uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
455 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
456 uint64_t last_pde = ~0, last_pt = ~0;
457 unsigned count = 0, pt_idx, ndw;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800458 struct amdgpu_ib *ib;
459 struct amdgpu_cs_parser *sched_job = NULL;
460
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400461 int r;
462
463 /* padding, etc. */
464 ndw = 64;
465
466 /* assume the worst case */
467 ndw += vm->max_pde_used * 6;
468
469 /* update too big for an IB */
470 if (ndw > 0xfffff)
471 return -ENOMEM;
472
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800473 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
474 if (!ib)
475 return -ENOMEM;
476
477 r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400478 if (r)
479 return r;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800480 ib->length_dw = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400481
482 /* walk over the address space and update the page directory */
483 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
484 struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
485 uint64_t pde, pt;
486
487 if (bo == NULL)
488 continue;
489
490 pt = amdgpu_bo_gpu_offset(bo);
491 if (vm->page_tables[pt_idx].addr == pt)
492 continue;
493 vm->page_tables[pt_idx].addr = pt;
494
495 pde = pd_addr + pt_idx * 8;
496 if (((last_pde + 8 * count) != pde) ||
497 ((last_pt + incr * count) != pt)) {
498
499 if (count) {
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800500 amdgpu_vm_update_pages(adev, ib, last_pde,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400501 last_pt, count, incr,
502 AMDGPU_PTE_VALID, 0);
503 }
504
505 count = 1;
506 last_pde = pde;
507 last_pt = pt;
508 } else {
509 ++count;
510 }
511 }
512
513 if (count)
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800514 amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400515 incr, AMDGPU_PTE_VALID, 0);
516
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800517 if (ib->length_dw != 0) {
518 amdgpu_vm_pad_ib(adev, ib);
519 amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
520 WARN_ON(ib->length_dw > ndw);
521
522 if (amdgpu_enable_scheduler) {
523 int r;
524 uint64_t v_seq;
525 sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
526 adev->kernel_ctx,
527 ib, 1);
528 if(!sched_job)
529 goto error_free;
530 sched_job->job_param.vm.bo = pd;
531 sched_job->run_job = amdgpu_vm_run_job;
532 sched_job->free_job = amdgpu_vm_free_job;
533 v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
Chunming Zhoud1ff9082015-07-30 17:59:43 +0800534 ib->sequence = v_seq;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800535 amd_sched_push_job(ring->scheduler,
536 &adev->kernel_ctx->rings[ring->idx].c_entity,
537 sched_job);
538 r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
539 v_seq,
Chunming Zhou51b9db22015-07-28 17:31:04 +0800540 false,
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800541 -1);
542 if (r)
543 DRM_ERROR("emit timeout\n");
544 } else {
545 r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
546 if (r) {
547 amdgpu_ib_free(adev, ib);
548 return r;
549 }
Chunming Zhoue40a3112015-08-03 11:38:09 +0800550 amdgpu_bo_fence(pd, &ib->fence->base, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400551 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400552 }
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800553
554 if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
555 amdgpu_ib_free(adev, ib);
556 kfree(ib);
557 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400558
559 return 0;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800560
561error_free:
562 if (sched_job)
563 kfree(sched_job);
564 amdgpu_ib_free(adev, ib);
565 kfree(ib);
566 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400567}
568
569/**
570 * amdgpu_vm_frag_ptes - add fragment information to PTEs
571 *
572 * @adev: amdgpu_device pointer
573 * @ib: IB for the update
574 * @pe_start: first PTE to handle
575 * @pe_end: last PTE to handle
576 * @addr: addr those PTEs should point to
577 * @flags: hw mapping flags
578 * @gtt_flags: GTT hw mapping flags
579 *
580 * Global and local mutex must be locked!
581 */
582static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
583 struct amdgpu_ib *ib,
584 uint64_t pe_start, uint64_t pe_end,
585 uint64_t addr, uint32_t flags,
586 uint32_t gtt_flags)
587{
588 /**
589 * The MC L1 TLB supports variable sized pages, based on a fragment
590 * field in the PTE. When this field is set to a non-zero value, page
591 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
592 * flags are considered valid for all PTEs within the fragment range
593 * and corresponding mappings are assumed to be physically contiguous.
594 *
595 * The L1 TLB can store a single PTE for the whole fragment,
596 * significantly increasing the space available for translation
597 * caching. This leads to large improvements in throughput when the
598 * TLB is under pressure.
599 *
600 * The L2 TLB distributes small and large fragments into two
601 * asymmetric partitions. The large fragment cache is significantly
602 * larger. Thus, we try to use large fragments wherever possible.
603 * Userspace can support this by aligning virtual base address and
604 * allocation size to the fragment size.
605 */
606
607 /* SI and newer are optimized for 64KB */
608 uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
609 uint64_t frag_align = 0x80;
610
611 uint64_t frag_start = ALIGN(pe_start, frag_align);
612 uint64_t frag_end = pe_end & ~(frag_align - 1);
613
614 unsigned count;
615
616 /* system pages are non continuously */
617 if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) ||
618 (frag_start >= frag_end)) {
619
620 count = (pe_end - pe_start) / 8;
621 amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
622 AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
623 return;
624 }
625
626 /* handle the 4K area at the beginning */
627 if (pe_start != frag_start) {
628 count = (frag_start - pe_start) / 8;
629 amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
630 AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
631 addr += AMDGPU_GPU_PAGE_SIZE * count;
632 }
633
634 /* handle the area in the middle */
635 count = (frag_end - frag_start) / 8;
636 amdgpu_vm_update_pages(adev, ib, frag_start, addr, count,
637 AMDGPU_GPU_PAGE_SIZE, flags | frag_flags,
638 gtt_flags);
639
640 /* handle the 4K area at the end */
641 if (frag_end != pe_end) {
642 addr += AMDGPU_GPU_PAGE_SIZE * count;
643 count = (pe_end - frag_end) / 8;
644 amdgpu_vm_update_pages(adev, ib, frag_end, addr, count,
645 AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
646 }
647}
648
649/**
650 * amdgpu_vm_update_ptes - make sure that page tables are valid
651 *
652 * @adev: amdgpu_device pointer
653 * @vm: requested vm
654 * @start: start of GPU address range
655 * @end: end of GPU address range
656 * @dst: destination address to map to
657 * @flags: mapping flags
658 *
659 * Update the page tables in the range @start - @end (cayman+).
660 *
661 * Global and local mutex must be locked!
662 */
663static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
664 struct amdgpu_vm *vm,
665 struct amdgpu_ib *ib,
666 uint64_t start, uint64_t end,
667 uint64_t dst, uint32_t flags,
668 uint32_t gtt_flags)
669{
670 uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
671 uint64_t last_pte = ~0, last_dst = ~0;
672 unsigned count = 0;
673 uint64_t addr;
674
675 /* walk over the address space and update the page tables */
676 for (addr = start; addr < end; ) {
677 uint64_t pt_idx = addr >> amdgpu_vm_block_size;
678 struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo;
679 unsigned nptes;
680 uint64_t pte;
681 int r;
682
683 amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv,
684 AMDGPU_FENCE_OWNER_VM);
685 r = reservation_object_reserve_shared(pt->tbo.resv);
686 if (r)
687 return r;
688
689 if ((addr & ~mask) == (end & ~mask))
690 nptes = end - addr;
691 else
692 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
693
694 pte = amdgpu_bo_gpu_offset(pt);
695 pte += (addr & mask) * 8;
696
697 if ((last_pte + 8 * count) != pte) {
698
699 if (count) {
700 amdgpu_vm_frag_ptes(adev, ib, last_pte,
701 last_pte + 8 * count,
702 last_dst, flags,
703 gtt_flags);
704 }
705
706 count = nptes;
707 last_pte = pte;
708 last_dst = dst;
709 } else {
710 count += nptes;
711 }
712
713 addr += nptes;
714 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
715 }
716
717 if (count) {
718 amdgpu_vm_frag_ptes(adev, ib, last_pte,
719 last_pte + 8 * count,
720 last_dst, flags, gtt_flags);
721 }
722
723 return 0;
724}
725
726/**
727 * amdgpu_vm_fence_pts - fence page tables after an update
728 *
729 * @vm: requested vm
730 * @start: start of GPU address range
731 * @end: end of GPU address range
732 * @fence: fence to use
733 *
734 * Fence the page tables in the range @start - @end (cayman+).
735 *
736 * Global and local mutex must be locked!
737 */
738static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
739 uint64_t start, uint64_t end,
Chunming Zhoubb1e38a42015-08-03 18:19:38 +0800740 struct fence *fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400741{
742 unsigned i;
743
744 start >>= amdgpu_vm_block_size;
745 end >>= amdgpu_vm_block_size;
746
747 for (i = start; i <= end; ++i)
Chunming Zhoubb1e38a42015-08-03 18:19:38 +0800748 amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400749}
750
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800751static int amdgpu_vm_bo_update_mapping_run_job(
752 struct amdgpu_cs_parser *sched_job)
753{
Chunming Zhoubb1e38a42015-08-03 18:19:38 +0800754 struct fence **fence = sched_job->job_param.vm_mapping.fence;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800755 amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
756 sched_job->job_param.vm_mapping.start,
757 sched_job->job_param.vm_mapping.last + 1,
Chunming Zhoubb1e38a42015-08-03 18:19:38 +0800758 &sched_job->ibs[sched_job->num_ibs -1].fence->base);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800759 if (fence) {
Chunming Zhoubb1e38a42015-08-03 18:19:38 +0800760 fence_put(*fence);
761 *fence = fence_get(&sched_job->ibs[sched_job->num_ibs -1].fence->base);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800762 }
763 return 0;
764}
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400765/**
766 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
767 *
768 * @adev: amdgpu_device pointer
769 * @vm: requested vm
770 * @mapping: mapped range and flags to use for the update
771 * @addr: addr to set the area to
772 * @gtt_flags: flags as they are used for GTT
773 * @fence: optional resulting fence
774 *
775 * Fill in the page table entries for @mapping.
776 * Returns 0 for success, -EINVAL for failure.
777 *
778 * Object have to be reserved and mutex must be locked!
779 */
780static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
781 struct amdgpu_vm *vm,
782 struct amdgpu_bo_va_mapping *mapping,
783 uint64_t addr, uint32_t gtt_flags,
Chunming Zhoubb1e38a42015-08-03 18:19:38 +0800784 struct fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400785{
786 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
787 unsigned nptes, ncmds, ndw;
788 uint32_t flags = gtt_flags;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800789 struct amdgpu_ib *ib;
790 struct amdgpu_cs_parser *sched_job = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400791 int r;
792
793 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
794 * but in case of something, we filter the flags in first place
795 */
796 if (!(mapping->flags & AMDGPU_PTE_READABLE))
797 flags &= ~AMDGPU_PTE_READABLE;
798 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
799 flags &= ~AMDGPU_PTE_WRITEABLE;
800
801 trace_amdgpu_vm_bo_update(mapping);
802
803 nptes = mapping->it.last - mapping->it.start + 1;
804
805 /*
806 * reserve space for one command every (1 << BLOCK_SIZE)
807 * entries or 2k dwords (whatever is smaller)
808 */
809 ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
810
811 /* padding, etc. */
812 ndw = 64;
813
814 if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
815 /* only copy commands needed */
816 ndw += ncmds * 7;
817
818 } else if (flags & AMDGPU_PTE_SYSTEM) {
819 /* header for write data commands */
820 ndw += ncmds * 4;
821
822 /* body of write data command */
823 ndw += nptes * 2;
824
825 } else {
826 /* set page commands needed */
827 ndw += ncmds * 10;
828
829 /* two extra commands for begin/end of fragment */
830 ndw += 2 * 10;
831 }
832
833 /* update too big for an IB */
834 if (ndw > 0xfffff)
835 return -ENOMEM;
836
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800837 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
838 if (!ib)
839 return -ENOMEM;
840
841 r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
842 if (r) {
843 kfree(ib);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400844 return r;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800845 }
846
847 ib->length_dw = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400848
849 if (!(flags & AMDGPU_PTE_VALID)) {
850 unsigned i;
851
852 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
853 struct amdgpu_fence *f = vm->ids[i].last_id_use;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800854 r = amdgpu_sync_fence(adev, &ib->sync, &f->base);
Christian König91e1a522015-07-06 22:06:40 +0200855 if (r)
856 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400857 }
858 }
859
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800860 r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400861 mapping->it.last + 1, addr + mapping->offset,
862 flags, gtt_flags);
863
864 if (r) {
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800865 amdgpu_ib_free(adev, ib);
866 kfree(ib);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400867 return r;
868 }
869
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800870 amdgpu_vm_pad_ib(adev, ib);
871 WARN_ON(ib->length_dw > ndw);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400872
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800873 if (amdgpu_enable_scheduler) {
874 int r;
875 uint64_t v_seq;
876 sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
877 adev->kernel_ctx, ib, 1);
878 if(!sched_job)
879 goto error_free;
880 sched_job->job_param.vm_mapping.vm = vm;
881 sched_job->job_param.vm_mapping.start = mapping->it.start;
882 sched_job->job_param.vm_mapping.last = mapping->it.last;
883 sched_job->job_param.vm_mapping.fence = fence;
884 sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
885 sched_job->free_job = amdgpu_vm_free_job;
886 v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
Chunming Zhoud1ff9082015-07-30 17:59:43 +0800887 ib->sequence = v_seq;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800888 amd_sched_push_job(ring->scheduler,
889 &adev->kernel_ctx->rings[ring->idx].c_entity,
890 sched_job);
891 r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
892 v_seq,
Chunming Zhou51b9db22015-07-28 17:31:04 +0800893 false,
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800894 -1);
895 if (r)
896 DRM_ERROR("emit timeout\n");
897 } else {
898 r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
899 if (r) {
900 amdgpu_ib_free(adev, ib);
901 return r;
902 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400903
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800904 amdgpu_vm_fence_pts(vm, mapping->it.start,
Chunming Zhoubb1e38a42015-08-03 18:19:38 +0800905 mapping->it.last + 1, &ib->fence->base);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800906 if (fence) {
Chunming Zhoubb1e38a42015-08-03 18:19:38 +0800907 fence_put(*fence);
908 *fence = fence_get(&ib->fence->base);
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800909 }
910
911 amdgpu_ib_free(adev, ib);
912 kfree(ib);
913 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400914 return 0;
Chunming Zhoud5fc5e82015-07-21 16:52:10 +0800915
916error_free:
917 if (sched_job)
918 kfree(sched_job);
919 amdgpu_ib_free(adev, ib);
920 kfree(ib);
921 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400922}
923
924/**
925 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
926 *
927 * @adev: amdgpu_device pointer
928 * @bo_va: requested BO and VM object
929 * @mem: ttm mem
930 *
931 * Fill in the page table entries for @bo_va.
932 * Returns 0 for success, -EINVAL for failure.
933 *
934 * Object have to be reserved and mutex must be locked!
935 */
936int amdgpu_vm_bo_update(struct amdgpu_device *adev,
937 struct amdgpu_bo_va *bo_va,
938 struct ttm_mem_reg *mem)
939{
940 struct amdgpu_vm *vm = bo_va->vm;
941 struct amdgpu_bo_va_mapping *mapping;
942 uint32_t flags;
943 uint64_t addr;
944 int r;
945
946 if (mem) {
947 addr = mem->start << PAGE_SHIFT;
948 if (mem->mem_type != TTM_PL_TT)
949 addr += adev->vm_manager.vram_base_offset;
950 } else {
951 addr = 0;
952 }
953
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400954 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
955
Christian König7fc11952015-07-30 11:53:42 +0200956 spin_lock(&vm->status_lock);
957 if (!list_empty(&bo_va->vm_status))
958 list_splice_init(&bo_va->valids, &bo_va->invalids);
959 spin_unlock(&vm->status_lock);
960
961 list_for_each_entry(mapping, &bo_va->invalids, list) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400962 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
963 flags, &bo_va->last_pt_update);
964 if (r)
965 return r;
966 }
967
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400968 spin_lock(&vm->status_lock);
969 list_del_init(&bo_va->vm_status);
Christian König7fc11952015-07-30 11:53:42 +0200970 if (!mem)
971 list_add(&bo_va->vm_status, &vm->cleared);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400972 spin_unlock(&vm->status_lock);
973
974 return 0;
975}
976
977/**
978 * amdgpu_vm_clear_freed - clear freed BOs in the PT
979 *
980 * @adev: amdgpu_device pointer
981 * @vm: requested vm
982 *
983 * Make sure all freed BOs are cleared in the PT.
984 * Returns 0 for success.
985 *
986 * PTs have to be reserved and mutex must be locked!
987 */
988int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
989 struct amdgpu_vm *vm)
990{
991 struct amdgpu_bo_va_mapping *mapping;
992 int r;
993
994 while (!list_empty(&vm->freed)) {
995 mapping = list_first_entry(&vm->freed,
996 struct amdgpu_bo_va_mapping, list);
997 list_del(&mapping->list);
998
999 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
1000 kfree(mapping);
1001 if (r)
1002 return r;
1003
1004 }
1005 return 0;
1006
1007}
1008
1009/**
1010 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1011 *
1012 * @adev: amdgpu_device pointer
1013 * @vm: requested vm
1014 *
1015 * Make sure all invalidated BOs are cleared in the PT.
1016 * Returns 0 for success.
1017 *
1018 * PTs have to be reserved and mutex must be locked!
1019 */
1020int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
monk.liucfe2c972015-05-26 15:01:54 +08001021 struct amdgpu_vm *vm, struct amdgpu_sync *sync)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001022{
monk.liucfe2c972015-05-26 15:01:54 +08001023 struct amdgpu_bo_va *bo_va = NULL;
Christian König91e1a522015-07-06 22:06:40 +02001024 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001025
1026 spin_lock(&vm->status_lock);
1027 while (!list_empty(&vm->invalidated)) {
1028 bo_va = list_first_entry(&vm->invalidated,
1029 struct amdgpu_bo_va, vm_status);
1030 spin_unlock(&vm->status_lock);
1031
1032 r = amdgpu_vm_bo_update(adev, bo_va, NULL);
1033 if (r)
1034 return r;
1035
1036 spin_lock(&vm->status_lock);
1037 }
1038 spin_unlock(&vm->status_lock);
1039
monk.liucfe2c972015-05-26 15:01:54 +08001040 if (bo_va)
Chunming Zhoubb1e38a42015-08-03 18:19:38 +08001041 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
Christian König91e1a522015-07-06 22:06:40 +02001042
1043 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001044}
1045
1046/**
1047 * amdgpu_vm_bo_add - add a bo to a specific vm
1048 *
1049 * @adev: amdgpu_device pointer
1050 * @vm: requested vm
1051 * @bo: amdgpu buffer object
1052 *
1053 * Add @bo into the requested vm (cayman+).
1054 * Add @bo to the list of bos associated with the vm
1055 * Returns newly added bo_va or NULL for failure
1056 *
1057 * Object has to be reserved!
1058 */
1059struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1060 struct amdgpu_vm *vm,
1061 struct amdgpu_bo *bo)
1062{
1063 struct amdgpu_bo_va *bo_va;
1064
1065 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1066 if (bo_va == NULL) {
1067 return NULL;
1068 }
1069 bo_va->vm = vm;
1070 bo_va->bo = bo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001071 bo_va->ref_count = 1;
1072 INIT_LIST_HEAD(&bo_va->bo_list);
Christian König7fc11952015-07-30 11:53:42 +02001073 INIT_LIST_HEAD(&bo_va->valids);
1074 INIT_LIST_HEAD(&bo_va->invalids);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001075 INIT_LIST_HEAD(&bo_va->vm_status);
1076
1077 mutex_lock(&vm->mutex);
1078 list_add_tail(&bo_va->bo_list, &bo->va);
1079 mutex_unlock(&vm->mutex);
1080
1081 return bo_va;
1082}
1083
1084/**
1085 * amdgpu_vm_bo_map - map bo inside a vm
1086 *
1087 * @adev: amdgpu_device pointer
1088 * @bo_va: bo_va to store the address
1089 * @saddr: where to map the BO
1090 * @offset: requested offset in the BO
1091 * @flags: attributes of pages (read/write/valid/etc.)
1092 *
1093 * Add a mapping of the BO at the specefied addr into the VM.
1094 * Returns 0 for success, error for failure.
1095 *
1096 * Object has to be reserved and gets unreserved by this function!
1097 */
1098int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1099 struct amdgpu_bo_va *bo_va,
1100 uint64_t saddr, uint64_t offset,
1101 uint64_t size, uint32_t flags)
1102{
1103 struct amdgpu_bo_va_mapping *mapping;
1104 struct amdgpu_vm *vm = bo_va->vm;
1105 struct interval_tree_node *it;
1106 unsigned last_pfn, pt_idx;
1107 uint64_t eaddr;
1108 int r;
1109
Christian König0be52de2015-05-18 14:37:27 +02001110 /* validate the parameters */
1111 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1112 size == 0 || size & AMDGPU_GPU_PAGE_MASK) {
1113 amdgpu_bo_unreserve(bo_va->bo);
1114 return -EINVAL;
1115 }
1116
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001117 /* make sure object fit at this offset */
1118 eaddr = saddr + size;
1119 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {
1120 amdgpu_bo_unreserve(bo_va->bo);
1121 return -EINVAL;
1122 }
1123
1124 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1125 if (last_pfn > adev->vm_manager.max_pfn) {
1126 dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
1127 last_pfn, adev->vm_manager.max_pfn);
1128 amdgpu_bo_unreserve(bo_va->bo);
1129 return -EINVAL;
1130 }
1131
1132 mutex_lock(&vm->mutex);
1133
1134 saddr /= AMDGPU_GPU_PAGE_SIZE;
1135 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1136
1137 it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
1138 if (it) {
1139 struct amdgpu_bo_va_mapping *tmp;
1140 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1141 /* bo and tmp overlap, invalid addr */
1142 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1143 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1144 tmp->it.start, tmp->it.last + 1);
1145 amdgpu_bo_unreserve(bo_va->bo);
1146 r = -EINVAL;
1147 goto error_unlock;
1148 }
1149
1150 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1151 if (!mapping) {
1152 amdgpu_bo_unreserve(bo_va->bo);
1153 r = -ENOMEM;
1154 goto error_unlock;
1155 }
1156
1157 INIT_LIST_HEAD(&mapping->list);
1158 mapping->it.start = saddr;
1159 mapping->it.last = eaddr - 1;
1160 mapping->offset = offset;
1161 mapping->flags = flags;
1162
Christian König7fc11952015-07-30 11:53:42 +02001163 list_add(&mapping->list, &bo_va->invalids);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001164 interval_tree_insert(&mapping->it, &vm->va);
Christian König93e3e432015-06-09 16:58:33 +02001165 trace_amdgpu_vm_bo_map(bo_va, mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001166
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001167 /* Make sure the page tables are allocated */
1168 saddr >>= amdgpu_vm_block_size;
1169 eaddr >>= amdgpu_vm_block_size;
1170
1171 BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1172
1173 if (eaddr > vm->max_pde_used)
1174 vm->max_pde_used = eaddr;
1175
1176 amdgpu_bo_unreserve(bo_va->bo);
1177
1178 /* walk over the address space and allocate the page tables */
1179 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1180 struct amdgpu_bo *pt;
1181
1182 if (vm->page_tables[pt_idx].bo)
1183 continue;
1184
1185 /* drop mutex to allocate and clear page table */
1186 mutex_unlock(&vm->mutex);
1187
1188 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1189 AMDGPU_GPU_PAGE_SIZE, true,
1190 AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt);
1191 if (r)
1192 goto error_free;
1193
1194 r = amdgpu_vm_clear_bo(adev, pt);
1195 if (r) {
1196 amdgpu_bo_unref(&pt);
1197 goto error_free;
1198 }
1199
1200 /* aquire mutex again */
1201 mutex_lock(&vm->mutex);
1202 if (vm->page_tables[pt_idx].bo) {
1203 /* someone else allocated the pt in the meantime */
1204 mutex_unlock(&vm->mutex);
1205 amdgpu_bo_unref(&pt);
1206 mutex_lock(&vm->mutex);
1207 continue;
1208 }
1209
1210 vm->page_tables[pt_idx].addr = 0;
1211 vm->page_tables[pt_idx].bo = pt;
1212 }
1213
1214 mutex_unlock(&vm->mutex);
1215 return 0;
1216
1217error_free:
1218 mutex_lock(&vm->mutex);
1219 list_del(&mapping->list);
1220 interval_tree_remove(&mapping->it, &vm->va);
Christian König93e3e432015-06-09 16:58:33 +02001221 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001222 kfree(mapping);
1223
1224error_unlock:
1225 mutex_unlock(&vm->mutex);
1226 return r;
1227}
1228
1229/**
1230 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1231 *
1232 * @adev: amdgpu_device pointer
1233 * @bo_va: bo_va to remove the address from
1234 * @saddr: where to the BO is mapped
1235 *
1236 * Remove a mapping of the BO at the specefied addr from the VM.
1237 * Returns 0 for success, error for failure.
1238 *
1239 * Object has to be reserved and gets unreserved by this function!
1240 */
1241int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1242 struct amdgpu_bo_va *bo_va,
1243 uint64_t saddr)
1244{
1245 struct amdgpu_bo_va_mapping *mapping;
1246 struct amdgpu_vm *vm = bo_va->vm;
Christian König7fc11952015-07-30 11:53:42 +02001247 bool valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001248
Christian König6c7fc502015-06-05 20:56:17 +02001249 saddr /= AMDGPU_GPU_PAGE_SIZE;
1250
Christian König7fc11952015-07-30 11:53:42 +02001251 list_for_each_entry(mapping, &bo_va->valids, list) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001252 if (mapping->it.start == saddr)
1253 break;
1254 }
1255
Christian König7fc11952015-07-30 11:53:42 +02001256 if (&mapping->list == &bo_va->valids) {
1257 valid = false;
1258
1259 list_for_each_entry(mapping, &bo_va->invalids, list) {
1260 if (mapping->it.start == saddr)
1261 break;
1262 }
1263
1264 if (&mapping->list == &bo_va->invalids) {
1265 amdgpu_bo_unreserve(bo_va->bo);
1266 return -ENOENT;
1267 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001268 }
1269
1270 mutex_lock(&vm->mutex);
1271 list_del(&mapping->list);
1272 interval_tree_remove(&mapping->it, &vm->va);
Christian König93e3e432015-06-09 16:58:33 +02001273 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001274
Christian König7fc11952015-07-30 11:53:42 +02001275 if (valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001276 list_add(&mapping->list, &vm->freed);
Christian König7fc11952015-07-30 11:53:42 +02001277 else
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001278 kfree(mapping);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001279 mutex_unlock(&vm->mutex);
1280 amdgpu_bo_unreserve(bo_va->bo);
1281
1282 return 0;
1283}
1284
1285/**
1286 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1287 *
1288 * @adev: amdgpu_device pointer
1289 * @bo_va: requested bo_va
1290 *
1291 * Remove @bo_va->bo from the requested vm (cayman+).
1292 *
1293 * Object have to be reserved!
1294 */
1295void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1296 struct amdgpu_bo_va *bo_va)
1297{
1298 struct amdgpu_bo_va_mapping *mapping, *next;
1299 struct amdgpu_vm *vm = bo_va->vm;
1300
1301 list_del(&bo_va->bo_list);
1302
1303 mutex_lock(&vm->mutex);
1304
1305 spin_lock(&vm->status_lock);
1306 list_del(&bo_va->vm_status);
1307 spin_unlock(&vm->status_lock);
1308
Christian König7fc11952015-07-30 11:53:42 +02001309 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001310 list_del(&mapping->list);
1311 interval_tree_remove(&mapping->it, &vm->va);
Christian König93e3e432015-06-09 16:58:33 +02001312 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
Christian König7fc11952015-07-30 11:53:42 +02001313 list_add(&mapping->list, &vm->freed);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001314 }
Christian König7fc11952015-07-30 11:53:42 +02001315 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1316 list_del(&mapping->list);
1317 interval_tree_remove(&mapping->it, &vm->va);
1318 kfree(mapping);
1319 }
1320
Chunming Zhoubb1e38a42015-08-03 18:19:38 +08001321 fence_put(bo_va->last_pt_update);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001322 kfree(bo_va);
1323
1324 mutex_unlock(&vm->mutex);
1325}
1326
1327/**
1328 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1329 *
1330 * @adev: amdgpu_device pointer
1331 * @vm: requested vm
1332 * @bo: amdgpu buffer object
1333 *
1334 * Mark @bo as invalid (cayman+).
1335 */
1336void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1337 struct amdgpu_bo *bo)
1338{
1339 struct amdgpu_bo_va *bo_va;
1340
1341 list_for_each_entry(bo_va, &bo->va, bo_list) {
Christian König7fc11952015-07-30 11:53:42 +02001342 spin_lock(&bo_va->vm->status_lock);
1343 if (list_empty(&bo_va->vm_status))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001344 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
Christian König7fc11952015-07-30 11:53:42 +02001345 spin_unlock(&bo_va->vm->status_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001346 }
1347}
1348
1349/**
1350 * amdgpu_vm_init - initialize a vm instance
1351 *
1352 * @adev: amdgpu_device pointer
1353 * @vm: requested vm
1354 *
1355 * Init @vm fields (cayman+).
1356 */
1357int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1358{
1359 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1360 AMDGPU_VM_PTE_COUNT * 8);
1361 unsigned pd_size, pd_entries, pts_size;
1362 int i, r;
1363
1364 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1365 vm->ids[i].id = 0;
1366 vm->ids[i].flushed_updates = NULL;
1367 vm->ids[i].last_id_use = NULL;
1368 }
1369 mutex_init(&vm->mutex);
1370 vm->va = RB_ROOT;
1371 spin_lock_init(&vm->status_lock);
1372 INIT_LIST_HEAD(&vm->invalidated);
Christian König7fc11952015-07-30 11:53:42 +02001373 INIT_LIST_HEAD(&vm->cleared);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001374 INIT_LIST_HEAD(&vm->freed);
1375
1376 pd_size = amdgpu_vm_directory_size(adev);
1377 pd_entries = amdgpu_vm_num_pdes(adev);
1378
1379 /* allocate page table array */
1380 pts_size = pd_entries * sizeof(struct amdgpu_vm_pt);
1381 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
1382 if (vm->page_tables == NULL) {
1383 DRM_ERROR("Cannot allocate memory for page table array\n");
1384 return -ENOMEM;
1385 }
1386
1387 r = amdgpu_bo_create(adev, pd_size, align, true,
1388 AMDGPU_GEM_DOMAIN_VRAM, 0,
1389 NULL, &vm->page_directory);
1390 if (r)
1391 return r;
1392
1393 r = amdgpu_vm_clear_bo(adev, vm->page_directory);
1394 if (r) {
1395 amdgpu_bo_unref(&vm->page_directory);
1396 vm->page_directory = NULL;
1397 return r;
1398 }
1399
1400 return 0;
1401}
1402
1403/**
1404 * amdgpu_vm_fini - tear down a vm instance
1405 *
1406 * @adev: amdgpu_device pointer
1407 * @vm: requested vm
1408 *
1409 * Tear down @vm (cayman+).
1410 * Unbind the VM and remove all bos from the vm bo list
1411 */
1412void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1413{
1414 struct amdgpu_bo_va_mapping *mapping, *tmp;
1415 int i;
1416
1417 if (!RB_EMPTY_ROOT(&vm->va)) {
1418 dev_err(adev->dev, "still active bo inside vm\n");
1419 }
1420 rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1421 list_del(&mapping->list);
1422 interval_tree_remove(&mapping->it, &vm->va);
1423 kfree(mapping);
1424 }
1425 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1426 list_del(&mapping->list);
1427 kfree(mapping);
1428 }
1429
1430 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
1431 amdgpu_bo_unref(&vm->page_tables[i].bo);
1432 kfree(vm->page_tables);
1433
1434 amdgpu_bo_unref(&vm->page_directory);
1435
1436 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1437 amdgpu_fence_unref(&vm->ids[i].flushed_updates);
1438 amdgpu_fence_unref(&vm->ids[i].last_id_use);
1439 }
1440
1441 mutex_destroy(&vm->mutex);
1442}